code
stringlengths
0
23.9M
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer * Copyright (C) 2005 Maciej W. Rozycki * Copyright (C) 2006 Ralf Baechle ([email protected]) * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. */ #ifndef __ASM_UASM_H #define __ASM_UASM_H #include <linux/types.h> #ifdef CONFIG_EXPORT_UASM #include <linux/export.h> #define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym) #else #define UASM_EXPORT_SYMBOL(sym) #endif #define Ip_u1u2u3(op) \ void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) #define Ip_u2u1u3(op) \ void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) #define Ip_u3u2u1(op) \ void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) #define Ip_u3u1u2(op) \ void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c) #define Ip_u1u2s3(op) \ void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) #define Ip_u2s3u1(op) \ void uasm_i##op(u32 **buf, unsigned int a, signed int b, unsigned int c) #define Ip_s3s1s2(op) \ void uasm_i##op(u32 **buf, int a, int b, int c) #define Ip_u2u1s3(op) \ void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, signed int c) #define Ip_u2u1msbu3(op) \ void uasm_i##op(u32 **buf, unsigned int a, unsigned int b, unsigned int c, \ unsigned int d) #define Ip_u1u2(op) \ void uasm_i##op(u32 **buf, unsigned int a, unsigned int b) #define Ip_u2u1(op) \ void uasm_i##op(u32 **buf, unsigned int a, unsigned int b) #define Ip_u1s2(op) \ void uasm_i##op(u32 **buf, unsigned int a, signed int b) #define Ip_u1(op) void uasm_i##op(u32 **buf, unsigned int a) #define Ip_0(op) void uasm_i##op(u32 **buf) Ip_u2u1s3(_addiu); Ip_u3u1u2(_addu); Ip_u3u1u2(_and); Ip_u2u1u3(_andi); Ip_u1u2s3(_bbit0); Ip_u1u2s3(_bbit1); Ip_u1u2s3(_beq); Ip_u1u2s3(_beql); Ip_u1s2(_bgez); Ip_u1s2(_bgezl); Ip_u1s2(_bgtz); Ip_u1s2(_blez); Ip_u1s2(_bltz); Ip_u1s2(_bltzl); Ip_u1u2s3(_bne); Ip_u1(_break); Ip_u2s3u1(_cache); Ip_u1u2(_cfc1); Ip_u2u1(_cfcmsa); Ip_u1u2(_ctc1); Ip_u2u1(_ctcmsa); Ip_u2u1s3(_daddiu); Ip_u3u1u2(_daddu); Ip_u1u2(_ddivu); Ip_u3u1u2(_ddivu_r6); Ip_u1(_di); Ip_u2u1msbu3(_dins); Ip_u2u1msbu3(_dinsm); Ip_u2u1msbu3(_dinsu); Ip_u1u2(_divu); Ip_u3u1u2(_divu_r6); Ip_u1u2u3(_dmfc0); Ip_u3u1u2(_dmodu); Ip_u1u2u3(_dmtc0); Ip_u1u2(_dmultu); Ip_u3u1u2(_dmulu); Ip_u2u1u3(_drotr); Ip_u2u1u3(_drotr32); Ip_u2u1(_dsbh); Ip_u2u1(_dshd); Ip_u2u1u3(_dsll); Ip_u2u1u3(_dsll32); Ip_u3u2u1(_dsllv); Ip_u2u1u3(_dsra); Ip_u2u1u3(_dsra32); Ip_u3u2u1(_dsrav); Ip_u2u1u3(_dsrl); Ip_u2u1u3(_dsrl32); Ip_u3u2u1(_dsrlv); Ip_u3u1u2(_dsubu); Ip_0(_eret); Ip_u2u1msbu3(_ext); Ip_u2u1msbu3(_ins); Ip_u1(_j); Ip_u1(_jal); Ip_u2u1(_jalr); Ip_u1(_jr); Ip_u2s3u1(_lb); Ip_u2s3u1(_lbu); Ip_u2s3u1(_ld); Ip_u3u1u2(_ldx); Ip_u2s3u1(_lh); Ip_u2s3u1(_lhu); Ip_u2s3u1(_ll); Ip_u2s3u1(_lld); Ip_u1s2(_lui); Ip_u2s3u1(_lw); Ip_u2s3u1(_lwu); Ip_u3u1u2(_lwx); Ip_u1u2u3(_mfc0); Ip_u1u2u3(_mfhc0); Ip_u1(_mfhi); Ip_u1(_mflo); Ip_u3u1u2(_modu); Ip_u3u1u2(_movn); Ip_u3u1u2(_movz); Ip_u1u2u3(_mtc0); Ip_u1u2u3(_mthc0); Ip_u1(_mthi); Ip_u1(_mtlo); Ip_u3u1u2(_mul); Ip_u1u2(_multu); Ip_u3u1u2(_mulu); Ip_u3u1u2(_muhu); Ip_u3u1u2(_nor); Ip_u3u1u2(_or); Ip_u2u1u3(_ori); Ip_u2s3u1(_pref); Ip_0(_rfe); Ip_u2u1u3(_rotr); Ip_u2s3u1(_sb); Ip_u2s3u1(_sc); Ip_u2s3u1(_scd); Ip_u2s3u1(_sd); Ip_u3u1u2(_seleqz); Ip_u3u1u2(_selnez); Ip_u2s3u1(_sh); Ip_u2u1u3(_sll); Ip_u3u2u1(_sllv); Ip_s3s1s2(_slt); Ip_u2u1s3(_slti); Ip_u2u1s3(_sltiu); Ip_u3u1u2(_sltu); Ip_u2u1u3(_sra); Ip_u3u2u1(_srav); Ip_u2u1u3(_srl); Ip_u3u2u1(_srlv); Ip_u3u1u2(_subu); Ip_u2s3u1(_sw); Ip_u1(_sync); Ip_u1(_syscall); Ip_0(_tlbp); Ip_0(_tlbr); Ip_0(_tlbwi); Ip_0(_tlbwr); Ip_u1(_wait); Ip_u2u1(_wsbh); Ip_u3u1u2(_xor); Ip_u2u1u3(_xori); Ip_u2u1(_yield); Ip_u1u2(_ldpte); Ip_u2u1u3(_lddir); /* Handle labels. */ struct uasm_label { u32 *addr; int lab; }; void uasm_build_label(struct uasm_label **lab, u32 *addr, int lid); int uasm_in_compat_space_p(long addr); int uasm_rel_hi(long val); int uasm_rel_lo(long val); void UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr); void UASM_i_LA(u32 **buf, unsigned int rs, long addr); #define UASM_L_LA(lb) \ static inline void uasm_l##lb(struct uasm_label **lab, u32 *addr) \ { \ uasm_build_label(lab, addr, label##lb); \ } /* convenience macros for instructions */ #ifdef CONFIG_64BIT # define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_daddiu(buf, rs, rt, val) # define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_daddu(buf, rs, rt, rd) # define UASM_i_LL(buf, rs, rt, off) uasm_i_lld(buf, rs, rt, off) # define UASM_i_LW(buf, rs, rt, off) uasm_i_ld(buf, rs, rt, off) # define UASM_i_LWX(buf, rs, rt, rd) uasm_i_ldx(buf, rs, rt, rd) # define UASM_i_MFC0(buf, rt, rd...) uasm_i_dmfc0(buf, rt, rd) # define UASM_i_MTC0(buf, rt, rd...) uasm_i_dmtc0(buf, rt, rd) # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_drotr(buf, rs, rt, sh) # define UASM_i_SC(buf, rs, rt, off) uasm_i_scd(buf, rs, rt, off) # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_dsll(buf, rs, rt, sh) # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_dsra(buf, rs, rt, sh) # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_dsrl(buf, rs, rt, sh) # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_dsrl_safe(buf, rs, rt, sh) # define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_dsubu(buf, rs, rt, rd) # define UASM_i_SW(buf, rs, rt, off) uasm_i_sd(buf, rs, rt, off) #else # define UASM_i_ADDIU(buf, rs, rt, val) uasm_i_addiu(buf, rs, rt, val) # define UASM_i_ADDU(buf, rs, rt, rd) uasm_i_addu(buf, rs, rt, rd) # define UASM_i_LL(buf, rs, rt, off) uasm_i_ll(buf, rs, rt, off) # define UASM_i_LW(buf, rs, rt, off) uasm_i_lw(buf, rs, rt, off) # define UASM_i_LWX(buf, rs, rt, rd) uasm_i_lwx(buf, rs, rt, rd) # define UASM_i_MFC0(buf, rt, rd...) uasm_i_mfc0(buf, rt, rd) # define UASM_i_MTC0(buf, rt, rd...) uasm_i_mtc0(buf, rt, rd) # define UASM_i_ROTR(buf, rs, rt, sh) uasm_i_rotr(buf, rs, rt, sh) # define UASM_i_SC(buf, rs, rt, off) uasm_i_sc(buf, rs, rt, off) # define UASM_i_SLL(buf, rs, rt, sh) uasm_i_sll(buf, rs, rt, sh) # define UASM_i_SRA(buf, rs, rt, sh) uasm_i_sra(buf, rs, rt, sh) # define UASM_i_SRL(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh) # define UASM_i_SRL_SAFE(buf, rs, rt, sh) uasm_i_srl(buf, rs, rt, sh) # define UASM_i_SUBU(buf, rs, rt, rd) uasm_i_subu(buf, rs, rt, rd) # define UASM_i_SW(buf, rs, rt, off) uasm_i_sw(buf, rs, rt, off) #endif #define uasm_i_b(buf, off) uasm_i_beq(buf, 0, 0, off) #define uasm_i_beqz(buf, rs, off) uasm_i_beq(buf, rs, 0, off) #define uasm_i_beqzl(buf, rs, off) uasm_i_beql(buf, rs, 0, off) #define uasm_i_bnez(buf, rs, off) uasm_i_bne(buf, rs, 0, off) #define uasm_i_bnezl(buf, rs, off) uasm_i_bnel(buf, rs, 0, off) #define uasm_i_ehb(buf) uasm_i_sll(buf, 0, 0, 3) #define uasm_i_move(buf, a, b) UASM_i_ADDU(buf, a, 0, b) #ifdef CONFIG_CPU_NOP_WORKAROUNDS #define uasm_i_nop(buf) uasm_i_or(buf, 1, 1, 0) #else #define uasm_i_nop(buf) uasm_i_sll(buf, 0, 0, 0) #endif #define uasm_i_ssnop(buf) uasm_i_sll(buf, 0, 0, 1) static inline void uasm_i_drotr_safe(u32 **p, unsigned int a1, unsigned int a2, unsigned int a3) { if (a3 < 32) uasm_i_drotr(p, a1, a2, a3); else uasm_i_drotr32(p, a1, a2, a3 - 32); } static inline void uasm_i_dsll_safe(u32 **p, unsigned int a1, unsigned int a2, unsigned int a3) { if (a3 < 32) uasm_i_dsll(p, a1, a2, a3); else uasm_i_dsll32(p, a1, a2, a3 - 32); } static inline void uasm_i_dsrl_safe(u32 **p, unsigned int a1, unsigned int a2, unsigned int a3) { if (a3 < 32) uasm_i_dsrl(p, a1, a2, a3); else uasm_i_dsrl32(p, a1, a2, a3 - 32); } static inline void uasm_i_dsra_safe(u32 **p, unsigned int a1, unsigned int a2, unsigned int a3) { if (a3 < 32) uasm_i_dsra(p, a1, a2, a3); else uasm_i_dsra32(p, a1, a2, a3 - 32); } /* Handle relocations. */ struct uasm_reloc { u32 *addr; unsigned int type; int lab; }; /* This is zero so we can use zeroed label arrays. */ #define UASM_LABEL_INVALID 0 void uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid); void uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab); void uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off); void uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off); void uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, u32 *end, u32 *target); int uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr); /* Convenience functions for labeled branches. */ void uasm_il_b(u32 **p, struct uasm_reloc **r, int lid); void uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid); void uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, unsigned int bit, int lid); void uasm_il_beq(u32 **p, struct uasm_reloc **r, unsigned int r1, unsigned int r2, int lid); void uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); void uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, unsigned int reg2, int lid); void uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid); #endif /* __ASM_UASM_H */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Joshua Henderson <[email protected]> * Copyright (C) 2015 Microchip Technology Inc. All rights reserved. */ #ifndef _ASM_MACH_PIC32_H #define _ASM_MACH_PIC32_H #include <linux/io.h> /* * PIC32 register offsets for SET/CLR/INV where supported. */ #define PIC32_CLR(_reg) ((_reg) + 0x04) #define PIC32_SET(_reg) ((_reg) + 0x08) #define PIC32_INV(_reg) ((_reg) + 0x0C) /* * PIC32 Base Register Offsets */ #define PIC32_BASE_CONFIG 0x1f800000 #define PIC32_BASE_OSC 0x1f801200 #define PIC32_BASE_RESET 0x1f801240 #define PIC32_BASE_PPS 0x1f801400 #define PIC32_BASE_UART 0x1f822000 #define PIC32_BASE_PORT 0x1f860000 #define PIC32_BASE_DEVCFG2 0x1fc4ff44 /* * Register unlock sequence required for some register access. */ void pic32_syskey_unlock_debug(const char *fn, const ulong ln); #define pic32_syskey_unlock() \ pic32_syskey_unlock_debug(__func__, __LINE__) #endif /* _ASM_MACH_PIC32_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2019, Intel Corporation. */ #ifndef _ICE_FLOW_H_ #define _ICE_FLOW_H_ #include "ice_flex_type.h" #include "ice_parser.h" #define ICE_FLOW_ENTRY_HANDLE_INVAL 0 #define ICE_FLOW_FLD_OFF_INVAL 0xffff /* Generate flow hash field from flow field type(s) */ #define ICE_FLOW_HASH_ETH \ (BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_DA) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_ETH_SA)) #define ICE_FLOW_HASH_IPV4 \ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA)) #define ICE_FLOW_HASH_IPV6 \ (BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA)) #define ICE_FLOW_HASH_TCP_PORT \ (BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT)) #define ICE_FLOW_HASH_UDP_PORT \ (BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT)) #define ICE_FLOW_HASH_SCTP_PORT \ (BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) | \ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT)) #define ICE_HASH_INVALID 0 #define ICE_HASH_TCP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_TCP_PORT) #define ICE_HASH_TCP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_TCP_PORT) #define ICE_HASH_UDP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_UDP_PORT) #define ICE_HASH_UDP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_UDP_PORT) #define ICE_HASH_SCTP_IPV4 (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_SCTP_PORT) #define ICE_HASH_SCTP_IPV6 (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_SCTP_PORT) #define ICE_FLOW_HASH_GTP_C_TEID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID)) #define ICE_FLOW_HASH_GTP_C_IPV4_TEID \ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_C_TEID) #define ICE_FLOW_HASH_GTP_C_IPV6_TEID \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_C_TEID) #define ICE_FLOW_HASH_GTP_U_TEID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID)) #define ICE_FLOW_HASH_GTP_U_IPV4_TEID \ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_TEID) #define ICE_FLOW_HASH_GTP_U_IPV6_TEID \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_TEID) #define ICE_FLOW_HASH_GTP_U_EH_TEID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID)) #define ICE_FLOW_HASH_GTP_U_EH_QFI \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_QFI)) #define ICE_FLOW_HASH_GTP_U_IPV4_EH \ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_EH_TEID | \ ICE_FLOW_HASH_GTP_U_EH_QFI) #define ICE_FLOW_HASH_GTP_U_IPV6_EH \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_EH_TEID | \ ICE_FLOW_HASH_GTP_U_EH_QFI) #define ICE_FLOW_HASH_GTP_U_UP \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID)) #define ICE_FLOW_HASH_GTP_U_DWN \ (BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID)) #define ICE_FLOW_HASH_GTP_U_IPV4_UP \ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_UP) #define ICE_FLOW_HASH_GTP_U_IPV6_UP \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_UP) #define ICE_FLOW_HASH_GTP_U_IPV4_DWN \ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_GTP_U_DWN) #define ICE_FLOW_HASH_GTP_U_IPV6_DWN \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_GTP_U_DWN) #define ICE_FLOW_HASH_PPPOE_SESS_ID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID)) #define ICE_FLOW_HASH_PPPOE_SESS_ID_ETH \ (ICE_FLOW_HASH_ETH | ICE_FLOW_HASH_PPPOE_SESS_ID) #define ICE_FLOW_HASH_PPPOE_TCP_ID \ (ICE_FLOW_HASH_TCP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID) #define ICE_FLOW_HASH_PPPOE_UDP_ID \ (ICE_FLOW_HASH_UDP_PORT | ICE_FLOW_HASH_PPPOE_SESS_ID) #define ICE_FLOW_HASH_PFCP_SEID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_PFCP_SEID)) #define ICE_FLOW_HASH_PFCP_IPV4_SEID \ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_PFCP_SEID) #define ICE_FLOW_HASH_PFCP_IPV6_SEID \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_PFCP_SEID) #define ICE_FLOW_HASH_L2TPV3_SESS_ID \ (BIT_ULL(ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID)) #define ICE_FLOW_HASH_L2TPV3_IPV4_SESS_ID \ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_L2TPV3_SESS_ID) #define ICE_FLOW_HASH_L2TPV3_IPV6_SESS_ID \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_L2TPV3_SESS_ID) #define ICE_FLOW_HASH_ESP_SPI \ (BIT_ULL(ICE_FLOW_FIELD_IDX_ESP_SPI)) #define ICE_FLOW_HASH_ESP_IPV4_SPI \ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_ESP_SPI) #define ICE_FLOW_HASH_ESP_IPV6_SPI \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_ESP_SPI) #define ICE_FLOW_HASH_AH_SPI \ (BIT_ULL(ICE_FLOW_FIELD_IDX_AH_SPI)) #define ICE_FLOW_HASH_AH_IPV4_SPI \ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_AH_SPI) #define ICE_FLOW_HASH_AH_IPV6_SPI \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_AH_SPI) #define ICE_FLOW_HASH_NAT_T_ESP_SPI \ (BIT_ULL(ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI)) #define ICE_FLOW_HASH_NAT_T_ESP_IPV4_SPI \ (ICE_FLOW_HASH_IPV4 | ICE_FLOW_HASH_NAT_T_ESP_SPI) #define ICE_FLOW_HASH_NAT_T_ESP_IPV6_SPI \ (ICE_FLOW_HASH_IPV6 | ICE_FLOW_HASH_NAT_T_ESP_SPI) /* Protocol header fields within a packet segment. A segment consists of one or * more protocol headers that make up a logical group of protocol headers. Each * logical group of protocol headers encapsulates or is encapsulated using/by * tunneling or encapsulation protocols for network virtualization such as GRE, * VxLAN, etc. */ enum ice_flow_seg_hdr { ICE_FLOW_SEG_HDR_NONE = 0x00000000, ICE_FLOW_SEG_HDR_ETH = 0x00000001, ICE_FLOW_SEG_HDR_VLAN = 0x00000002, ICE_FLOW_SEG_HDR_IPV4 = 0x00000004, ICE_FLOW_SEG_HDR_IPV6 = 0x00000008, ICE_FLOW_SEG_HDR_ARP = 0x00000010, ICE_FLOW_SEG_HDR_ICMP = 0x00000020, ICE_FLOW_SEG_HDR_TCP = 0x00000040, ICE_FLOW_SEG_HDR_UDP = 0x00000080, ICE_FLOW_SEG_HDR_SCTP = 0x00000100, ICE_FLOW_SEG_HDR_GRE = 0x00000200, ICE_FLOW_SEG_HDR_GTPC = 0x00000400, ICE_FLOW_SEG_HDR_GTPC_TEID = 0x00000800, ICE_FLOW_SEG_HDR_GTPU_IP = 0x00001000, ICE_FLOW_SEG_HDR_GTPU_EH = 0x00002000, ICE_FLOW_SEG_HDR_GTPU_DWN = 0x00004000, ICE_FLOW_SEG_HDR_GTPU_UP = 0x00008000, ICE_FLOW_SEG_HDR_PPPOE = 0x00010000, ICE_FLOW_SEG_HDR_PFCP_NODE = 0x00020000, ICE_FLOW_SEG_HDR_PFCP_SESSION = 0x00040000, ICE_FLOW_SEG_HDR_L2TPV3 = 0x00080000, ICE_FLOW_SEG_HDR_ESP = 0x00100000, ICE_FLOW_SEG_HDR_AH = 0x00200000, ICE_FLOW_SEG_HDR_NAT_T_ESP = 0x00400000, ICE_FLOW_SEG_HDR_ETH_NON_IP = 0x00800000, /* The following is an additive bit for ICE_FLOW_SEG_HDR_IPV4 and * ICE_FLOW_SEG_HDR_IPV6 which include the IPV4 other PTYPEs */ ICE_FLOW_SEG_HDR_IPV_OTHER = 0x20000000, }; /* These segments all have the same PTYPES, but are otherwise distinguished by * the value of the gtp_eh_pdu and gtp_eh_pdu_link flags: * * gtp_eh_pdu gtp_eh_pdu_link * ICE_FLOW_SEG_HDR_GTPU_IP 0 0 * ICE_FLOW_SEG_HDR_GTPU_EH 1 don't care * ICE_FLOW_SEG_HDR_GTPU_DWN 1 0 * ICE_FLOW_SEG_HDR_GTPU_UP 1 1 */ #define ICE_FLOW_SEG_HDR_GTPU (ICE_FLOW_SEG_HDR_GTPU_IP | \ ICE_FLOW_SEG_HDR_GTPU_EH | \ ICE_FLOW_SEG_HDR_GTPU_DWN | \ ICE_FLOW_SEG_HDR_GTPU_UP) #define ICE_FLOW_SEG_HDR_PFCP (ICE_FLOW_SEG_HDR_PFCP_NODE | \ ICE_FLOW_SEG_HDR_PFCP_SESSION) enum ice_flow_field { /* L2 */ ICE_FLOW_FIELD_IDX_ETH_DA, ICE_FLOW_FIELD_IDX_ETH_SA, ICE_FLOW_FIELD_IDX_S_VLAN, ICE_FLOW_FIELD_IDX_C_VLAN, ICE_FLOW_FIELD_IDX_ETH_TYPE, /* L3 */ ICE_FLOW_FIELD_IDX_IPV4_DSCP, ICE_FLOW_FIELD_IDX_IPV6_DSCP, ICE_FLOW_FIELD_IDX_IPV4_TTL, ICE_FLOW_FIELD_IDX_IPV4_PROT, ICE_FLOW_FIELD_IDX_IPV6_TTL, ICE_FLOW_FIELD_IDX_IPV6_PROT, ICE_FLOW_FIELD_IDX_IPV4_SA, ICE_FLOW_FIELD_IDX_IPV4_DA, ICE_FLOW_FIELD_IDX_IPV6_SA, ICE_FLOW_FIELD_IDX_IPV6_DA, /* L4 */ ICE_FLOW_FIELD_IDX_TCP_SRC_PORT, ICE_FLOW_FIELD_IDX_TCP_DST_PORT, ICE_FLOW_FIELD_IDX_UDP_SRC_PORT, ICE_FLOW_FIELD_IDX_UDP_DST_PORT, ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT, ICE_FLOW_FIELD_IDX_SCTP_DST_PORT, ICE_FLOW_FIELD_IDX_TCP_FLAGS, /* ARP */ ICE_FLOW_FIELD_IDX_ARP_SIP, ICE_FLOW_FIELD_IDX_ARP_DIP, ICE_FLOW_FIELD_IDX_ARP_SHA, ICE_FLOW_FIELD_IDX_ARP_DHA, ICE_FLOW_FIELD_IDX_ARP_OP, /* ICMP */ ICE_FLOW_FIELD_IDX_ICMP_TYPE, ICE_FLOW_FIELD_IDX_ICMP_CODE, /* GRE */ ICE_FLOW_FIELD_IDX_GRE_KEYID, /* GTPC_TEID */ ICE_FLOW_FIELD_IDX_GTPC_TEID, /* GTPU_IP */ ICE_FLOW_FIELD_IDX_GTPU_IP_TEID, /* GTPU_EH */ ICE_FLOW_FIELD_IDX_GTPU_EH_TEID, ICE_FLOW_FIELD_IDX_GTPU_EH_QFI, /* GTPU_UP */ ICE_FLOW_FIELD_IDX_GTPU_UP_TEID, /* GTPU_DWN */ ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID, /* PPPoE */ ICE_FLOW_FIELD_IDX_PPPOE_SESS_ID, /* PFCP */ ICE_FLOW_FIELD_IDX_PFCP_SEID, /* L2TPv3 */ ICE_FLOW_FIELD_IDX_L2TPV3_SESS_ID, /* ESP */ ICE_FLOW_FIELD_IDX_ESP_SPI, /* AH */ ICE_FLOW_FIELD_IDX_AH_SPI, /* NAT_T ESP */ ICE_FLOW_FIELD_IDX_NAT_T_ESP_SPI, /* The total number of enums must not exceed 64 */ ICE_FLOW_FIELD_IDX_MAX }; #define ICE_FLOW_HASH_FLD_IPV4_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_SA) #define ICE_FLOW_HASH_FLD_IPV6_SA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_SA) #define ICE_FLOW_HASH_FLD_IPV4_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV4_DA) #define ICE_FLOW_HASH_FLD_IPV6_DA BIT_ULL(ICE_FLOW_FIELD_IDX_IPV6_DA) #define ICE_FLOW_HASH_FLD_TCP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_SRC_PORT) #define ICE_FLOW_HASH_FLD_TCP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_TCP_DST_PORT) #define ICE_FLOW_HASH_FLD_UDP_SRC_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_SRC_PORT) #define ICE_FLOW_HASH_FLD_UDP_DST_PORT BIT_ULL(ICE_FLOW_FIELD_IDX_UDP_DST_PORT) #define ICE_FLOW_HASH_FLD_SCTP_SRC_PORT \ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_SRC_PORT) #define ICE_FLOW_HASH_FLD_SCTP_DST_PORT \ BIT_ULL(ICE_FLOW_FIELD_IDX_SCTP_DST_PORT) #define ICE_FLOW_HASH_FLD_GTPC_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPC_TEID) #define ICE_FLOW_HASH_FLD_GTPU_IP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_IP_TEID) #define ICE_FLOW_HASH_FLD_GTPU_EH_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_EH_TEID) #define ICE_FLOW_HASH_FLD_GTPU_UP_TEID BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_UP_TEID) #define ICE_FLOW_HASH_FLD_GTPU_DWN_TEID \ BIT_ULL(ICE_FLOW_FIELD_IDX_GTPU_DWN_TEID) /* Flow headers and fields for AVF support */ enum ice_flow_avf_hdr_field { /* Values 0 - 28 are reserved for future use */ ICE_AVF_FLOW_FIELD_INVALID = 0, ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP = 29, ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP, ICE_AVF_FLOW_FIELD_IPV4_UDP, ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK, ICE_AVF_FLOW_FIELD_IPV4_TCP, ICE_AVF_FLOW_FIELD_IPV4_SCTP, ICE_AVF_FLOW_FIELD_IPV4_OTHER, ICE_AVF_FLOW_FIELD_FRAG_IPV4, /* Values 37-38 are reserved */ ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP = 39, ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP, ICE_AVF_FLOW_FIELD_IPV6_UDP, ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK, ICE_AVF_FLOW_FIELD_IPV6_TCP, ICE_AVF_FLOW_FIELD_IPV6_SCTP, ICE_AVF_FLOW_FIELD_IPV6_OTHER, ICE_AVF_FLOW_FIELD_FRAG_IPV6, ICE_AVF_FLOW_FIELD_RSVD47, ICE_AVF_FLOW_FIELD_FCOE_OX, ICE_AVF_FLOW_FIELD_FCOE_RX, ICE_AVF_FLOW_FIELD_FCOE_OTHER, /* Values 51-62 are reserved */ ICE_AVF_FLOW_FIELD_L2_PAYLOAD = 63, ICE_AVF_FLOW_FIELD_MAX }; /* Supported RSS offloads This macro is defined to support * VIRTCHNL_OP_GET_RSS_HENA_CAPS ops. PF driver sends the RSS hardware * capabilities to the caller of this ops. */ #define ICE_DEFAULT_RSS_HENA ( \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_SCTP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_OTHER) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV4) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_SCTP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_OTHER) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_FRAG_IPV6) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV4_TCP_SYN_NO_ACK) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV4_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV4_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_IPV6_TCP_SYN_NO_ACK) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_UNICAST_IPV6_UDP) | \ BIT_ULL(ICE_AVF_FLOW_FIELD_MULTICAST_IPV6_UDP)) enum ice_rss_cfg_hdr_type { ICE_RSS_OUTER_HEADERS, /* take outer headers as inputset. */ ICE_RSS_INNER_HEADERS, /* take inner headers as inputset. */ /* take inner headers as inputset for packet with outer ipv4. */ ICE_RSS_INNER_HEADERS_W_OUTER_IPV4, /* take inner headers as inputset for packet with outer ipv6. */ ICE_RSS_INNER_HEADERS_W_OUTER_IPV6, /* take outer headers first then inner headers as inputset */ ICE_RSS_ANY_HEADERS }; struct ice_vsi; struct ice_rss_hash_cfg { u32 addl_hdrs; /* protocol header fields */ u64 hash_flds; /* hash bit field (ICE_FLOW_HASH_*) to configure */ enum ice_rss_cfg_hdr_type hdr_type; /* to specify inner or outer */ bool symm; /* symmetric or asymmetric hash */ }; enum ice_flow_dir { ICE_FLOW_RX = 0x02, }; enum ice_flow_priority { ICE_FLOW_PRIO_LOW, ICE_FLOW_PRIO_NORMAL, ICE_FLOW_PRIO_HIGH }; #define ICE_FLOW_SEG_SINGLE 1 #define ICE_FLOW_SEG_MAX 2 #define ICE_FLOW_SEG_RAW_FLD_MAX 2 #define ICE_FLOW_SW_FIELD_VECTOR_MAX 48 #define ICE_FLOW_FV_EXTRACT_SZ 2 #define ICE_FLOW_SET_HDRS(seg, val) ((seg)->hdrs |= (u32)(val)) struct ice_flow_seg_xtrct { u8 prot_id; /* Protocol ID of extracted header field */ u16 off; /* Starting offset of the field in header in bytes */ u8 idx; /* Index of FV entry used */ u8 disp; /* Displacement of field in bits fr. FV entry's start */ u16 mask; /* Mask for field */ }; enum ice_flow_fld_match_type { ICE_FLOW_FLD_TYPE_REG, /* Value, mask */ ICE_FLOW_FLD_TYPE_RANGE, /* Value, mask, last (upper bound) */ ICE_FLOW_FLD_TYPE_PREFIX, /* IP address, prefix, size of prefix */ ICE_FLOW_FLD_TYPE_SIZE, /* Value, mask, size of match */ }; struct ice_flow_fld_loc { /* Describe offsets of field information relative to the beginning of * input buffer provided when adding flow entries. */ u16 val; /* Offset where the value is located */ u16 mask; /* Offset where the mask/prefix value is located */ u16 last; /* Length or offset where the upper value is located */ }; struct ice_flow_fld_info { enum ice_flow_fld_match_type type; /* Location where to retrieve data from an input buffer */ struct ice_flow_fld_loc src; /* Location where to put the data into the final entry buffer */ struct ice_flow_fld_loc entry; struct ice_flow_seg_xtrct xtrct; }; struct ice_flow_seg_fld_raw { struct ice_flow_fld_info info; u16 off; /* Offset from the start of the segment */ }; struct ice_flow_seg_info { u32 hdrs; /* Bitmask indicating protocol headers present */ u64 match; /* Bitmask indicating header fields to be matched */ u64 range; /* Bitmask indicating header fields matched as ranges */ struct ice_flow_fld_info fields[ICE_FLOW_FIELD_IDX_MAX]; u8 raws_cnt; /* Number of raw fields to be matched */ struct ice_flow_seg_fld_raw raws[ICE_FLOW_SEG_RAW_FLD_MAX]; }; /* This structure describes a flow entry, and is tracked only in this file */ struct ice_flow_entry { struct list_head l_entry; u64 id; struct ice_flow_prof *prof; enum ice_flow_priority priority; u16 vsi_handle; }; #define ICE_FLOW_ENTRY_HNDL(e) ((u64)(uintptr_t)e) #define ICE_FLOW_ENTRY_PTR(h) ((struct ice_flow_entry *)(uintptr_t)(h)) struct ice_flow_prof { struct list_head l_entry; u64 id; enum ice_flow_dir dir; u8 segs_cnt; /* Keep track of flow entries associated with this flow profile */ struct mutex entries_lock; struct list_head entries; struct ice_flow_seg_info segs[ICE_FLOW_SEG_MAX]; /* software VSI handles referenced by this flow profile */ DECLARE_BITMAP(vsis, ICE_MAX_VSI); bool symm; /* Symmetric Hash for RSS */ }; struct ice_rss_cfg { struct list_head l_entry; /* bitmap of VSIs added to the RSS entry */ DECLARE_BITMAP(vsis, ICE_MAX_VSI); struct ice_rss_hash_cfg hash; }; int ice_flow_add_prof(struct ice_hw *hw, enum ice_block blk, enum ice_flow_dir dir, struct ice_flow_seg_info *segs, u8 segs_cnt, bool symm, struct ice_flow_prof **prof); int ice_flow_rem_prof(struct ice_hw *hw, enum ice_block blk, u64 prof_id); int ice_flow_set_parser_prof(struct ice_hw *hw, u16 dest_vsi, u16 fdir_vsi, struct ice_parser_profile *prof, enum ice_block blk); int ice_flow_add_entry(struct ice_hw *hw, enum ice_block blk, u64 prof_id, u64 entry_id, u16 vsi, enum ice_flow_priority prio, void *data, u64 *entry_h); int ice_flow_rem_entry(struct ice_hw *hw, enum ice_block blk, u64 entry_h); void ice_flow_set_fld(struct ice_flow_seg_info *seg, enum ice_flow_field fld, u16 val_loc, u16 mask_loc, u16 last_loc, bool range); void ice_flow_add_fld_raw(struct ice_flow_seg_info *seg, u16 off, u8 len, u16 val_loc, u16 mask_loc); int ice_flow_rem_vsi_prof(struct ice_hw *hw, u16 vsi_handle, u64 prof_id); void ice_rem_vsi_rss_list(struct ice_hw *hw, u16 vsi_handle); int ice_replay_rss_cfg(struct ice_hw *hw, u16 vsi_handle); int ice_set_rss_cfg_symm(struct ice_hw *hw, struct ice_vsi *vsi, bool symm); int ice_add_avf_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, u64 hashed_flds); int ice_rem_vsi_rss_cfg(struct ice_hw *hw, u16 vsi_handle); int ice_add_rss_cfg(struct ice_hw *hw, struct ice_vsi *vsi, const struct ice_rss_hash_cfg *cfg); int ice_rem_rss_cfg(struct ice_hw *hw, u16 vsi_handle, const struct ice_rss_hash_cfg *cfg); u64 ice_get_rss_cfg(struct ice_hw *hw, u16 vsi_handle, u32 hdrs, bool *symm); #endif /* _ICE_FLOW_H_ */
// SPDX-License-Identifier: GPL-2.0-only /* * Copied from arch/arm64/kernel/cpufeature.c * * Copyright (C) 2015 ARM Ltd. * Copyright (C) 2017 SiFive */ #include <linux/acpi.h> #include <linux/bitmap.h> #include <linux/cpu.h> #include <linux/cpuhotplug.h> #include <linux/ctype.h> #include <linux/log2.h> #include <linux/memory.h> #include <linux/module.h> #include <linux/of.h> #include <asm/acpi.h> #include <asm/alternative.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> #include <asm/hwcap.h> #include <asm/text-patching.h> #include <asm/hwprobe.h> #include <asm/processor.h> #include <asm/sbi.h> #include <asm/vector.h> #include <asm/vendor_extensions.h> #define NUM_ALPHA_EXTS ('z' - 'a' + 1) static bool any_cpu_has_zicboz; unsigned long elf_hwcap __read_mostly; /* Host ISA bitmap */ static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly; /* Per-cpu ISA extensions. */ struct riscv_isainfo hart_isa[NR_CPUS]; /** * riscv_isa_extension_base() - Get base extension word * * @isa_bitmap: ISA bitmap to use * Return: base extension word as unsigned long value * * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used. */ unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap) { if (!isa_bitmap) return riscv_isa[0]; return isa_bitmap[0]; } EXPORT_SYMBOL_GPL(riscv_isa_extension_base); /** * __riscv_isa_extension_available() - Check whether given extension * is available or not * * @isa_bitmap: ISA bitmap to use * @bit: bit position of the desired extension * Return: true or false * * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used. */ bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit) { const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa; if (bit >= RISCV_ISA_EXT_MAX) return false; return test_bit(bit, bmap) ? true : false; } EXPORT_SYMBOL_GPL(__riscv_isa_extension_available); static int riscv_ext_zicbom_validate(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap) { if (!riscv_cbom_block_size) { pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n"); return -EINVAL; } if (!is_power_of_2(riscv_cbom_block_size)) { pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n"); return -EINVAL; } return 0; } static int riscv_ext_zicboz_validate(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap) { if (!riscv_cboz_block_size) { pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n"); return -EINVAL; } if (!is_power_of_2(riscv_cboz_block_size)) { pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n"); return -EINVAL; } any_cpu_has_zicboz = true; return 0; } static int riscv_ext_zca_depends(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap) { if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA)) return 0; return -EPROBE_DEFER; } static int riscv_ext_zcd_validate(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap) { if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA) && __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_d)) return 0; return -EPROBE_DEFER; } static int riscv_ext_zcf_validate(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap) { if (IS_ENABLED(CONFIG_64BIT)) return -EINVAL; if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_ZCA) && __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_f)) return 0; return -EPROBE_DEFER; } static int riscv_ext_svadu_validate(const struct riscv_isa_ext_data *data, const unsigned long *isa_bitmap) { /* SVADE has already been detected, use SVADE only */ if (__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_SVADE)) return -EOPNOTSUPP; return 0; } static const unsigned int riscv_zk_bundled_exts[] = { RISCV_ISA_EXT_ZBKB, RISCV_ISA_EXT_ZBKC, RISCV_ISA_EXT_ZBKX, RISCV_ISA_EXT_ZKND, RISCV_ISA_EXT_ZKNE, RISCV_ISA_EXT_ZKR, RISCV_ISA_EXT_ZKT, }; static const unsigned int riscv_zkn_bundled_exts[] = { RISCV_ISA_EXT_ZBKB, RISCV_ISA_EXT_ZBKC, RISCV_ISA_EXT_ZBKX, RISCV_ISA_EXT_ZKND, RISCV_ISA_EXT_ZKNE, RISCV_ISA_EXT_ZKNH, }; static const unsigned int riscv_zks_bundled_exts[] = { RISCV_ISA_EXT_ZBKB, RISCV_ISA_EXT_ZBKC, RISCV_ISA_EXT_ZKSED, RISCV_ISA_EXT_ZKSH }; #define RISCV_ISA_EXT_ZVKN \ RISCV_ISA_EXT_ZVKNED, \ RISCV_ISA_EXT_ZVKNHB, \ RISCV_ISA_EXT_ZVKB, \ RISCV_ISA_EXT_ZVKT static const unsigned int riscv_zvkn_bundled_exts[] = { RISCV_ISA_EXT_ZVKN }; static const unsigned int riscv_zvknc_bundled_exts[] = { RISCV_ISA_EXT_ZVKN, RISCV_ISA_EXT_ZVBC }; static const unsigned int riscv_zvkng_bundled_exts[] = { RISCV_ISA_EXT_ZVKN, RISCV_ISA_EXT_ZVKG }; #define RISCV_ISA_EXT_ZVKS \ RISCV_ISA_EXT_ZVKSED, \ RISCV_ISA_EXT_ZVKSH, \ RISCV_ISA_EXT_ZVKB, \ RISCV_ISA_EXT_ZVKT static const unsigned int riscv_zvks_bundled_exts[] = { RISCV_ISA_EXT_ZVKS }; static const unsigned int riscv_zvksc_bundled_exts[] = { RISCV_ISA_EXT_ZVKS, RISCV_ISA_EXT_ZVBC }; static const unsigned int riscv_zvksg_bundled_exts[] = { RISCV_ISA_EXT_ZVKS, RISCV_ISA_EXT_ZVKG }; static const unsigned int riscv_zvbb_exts[] = { RISCV_ISA_EXT_ZVKB }; #define RISCV_ISA_EXT_ZVE64F_IMPLY_LIST \ RISCV_ISA_EXT_ZVE64X, \ RISCV_ISA_EXT_ZVE32F, \ RISCV_ISA_EXT_ZVE32X #define RISCV_ISA_EXT_ZVE64D_IMPLY_LIST \ RISCV_ISA_EXT_ZVE64F, \ RISCV_ISA_EXT_ZVE64F_IMPLY_LIST #define RISCV_ISA_EXT_V_IMPLY_LIST \ RISCV_ISA_EXT_ZVE64D, \ RISCV_ISA_EXT_ZVE64D_IMPLY_LIST static const unsigned int riscv_zve32f_exts[] = { RISCV_ISA_EXT_ZVE32X }; static const unsigned int riscv_zve64f_exts[] = { RISCV_ISA_EXT_ZVE64F_IMPLY_LIST }; static const unsigned int riscv_zve64d_exts[] = { RISCV_ISA_EXT_ZVE64D_IMPLY_LIST }; static const unsigned int riscv_v_exts[] = { RISCV_ISA_EXT_V_IMPLY_LIST }; static const unsigned int riscv_zve64x_exts[] = { RISCV_ISA_EXT_ZVE32X, RISCV_ISA_EXT_ZVE64X }; /* * While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V * privileged ISA, the existence of the CSRs is implied by any extension which * specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the * existence of the CSR, and treat it as a subset of those other extensions. */ static const unsigned int riscv_xlinuxenvcfg_exts[] = { RISCV_ISA_EXT_XLINUXENVCFG }; /* * Zc* spec states that: * - C always implies Zca * - C+F implies Zcf (RV32 only) * - C+D implies Zcd * * These extensions will be enabled and then validated depending on the * availability of F/D RV32. */ static const unsigned int riscv_c_exts[] = { RISCV_ISA_EXT_ZCA, RISCV_ISA_EXT_ZCF, RISCV_ISA_EXT_ZCD, }; /* * The canonical order of ISA extension names in the ISA string is defined in * chapter 27 of the unprivileged specification. * * Ordinarily, for in-kernel data structures, this order is unimportant but * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo. * * The specification uses vague wording, such as should, when it comes to * ordering, so for our purposes the following rules apply: * * 1. All multi-letter extensions must be separated from other extensions by an * underscore. * * 2. Additional standard extensions (starting with 'Z') must be sorted after * single-letter extensions and before any higher-privileged extensions. * * 3. The first letter following the 'Z' conventionally indicates the most * closely related alphabetical extension category, IMAFDQLCBKJTPVH. * If multiple 'Z' extensions are named, they must be ordered first by * category, then alphabetically within a category. * * 3. Standard supervisor-level extensions (starting with 'S') must be listed * after standard unprivileged extensions. If multiple supervisor-level * extensions are listed, they must be ordered alphabetically. * * 4. Standard machine-level extensions (starting with 'Zxm') must be listed * after any lower-privileged, standard extensions. If multiple * machine-level extensions are listed, they must be ordered * alphabetically. * * 5. Non-standard extensions (starting with 'X') must be listed after all * standard extensions. If multiple non-standard extensions are listed, they * must be ordered alphabetically. * * An example string following the order is: * rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux * * New entries to this struct should follow the ordering rules described above. */ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i), __RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m), __RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a), __RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f), __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d), __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q), __RISCV_ISA_EXT_SUPERSET(c, RISCV_ISA_EXT_c, riscv_c_exts), __RISCV_ISA_EXT_SUPERSET(v, RISCV_ISA_EXT_v, riscv_v_exts), __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h), __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts, riscv_ext_zicbom_validate), __RISCV_ISA_EXT_SUPERSET_VALIDATE(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts, riscv_ext_zicboz_validate), __RISCV_ISA_EXT_DATA(ziccrse, RISCV_ISA_EXT_ZICCRSE), __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR), __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND), __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR), __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI), __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL), __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), __RISCV_ISA_EXT_DATA(zimop, RISCV_ISA_EXT_ZIMOP), __RISCV_ISA_EXT_DATA(zabha, RISCV_ISA_EXT_ZABHA), __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), __RISCV_ISA_EXT_DATA(zawrs, RISCV_ISA_EXT_ZAWRS), __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH), __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN), __RISCV_ISA_EXT_DATA(zca, RISCV_ISA_EXT_ZCA), __RISCV_ISA_EXT_DATA_VALIDATE(zcb, RISCV_ISA_EXT_ZCB, riscv_ext_zca_depends), __RISCV_ISA_EXT_DATA_VALIDATE(zcd, RISCV_ISA_EXT_ZCD, riscv_ext_zcd_validate), __RISCV_ISA_EXT_DATA_VALIDATE(zcf, RISCV_ISA_EXT_ZCF, riscv_ext_zcf_validate), __RISCV_ISA_EXT_DATA_VALIDATE(zcmop, RISCV_ISA_EXT_ZCMOP, riscv_ext_zca_depends), __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA), __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB), __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC), __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB), __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC), __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX), __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS), __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts), __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts), __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND), __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE), __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH), __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR), __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts), __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT), __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED), __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH), __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO), __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts), __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC), __RISCV_ISA_EXT_SUPERSET(zve32f, RISCV_ISA_EXT_ZVE32F, riscv_zve32f_exts), __RISCV_ISA_EXT_DATA(zve32x, RISCV_ISA_EXT_ZVE32X), __RISCV_ISA_EXT_SUPERSET(zve64d, RISCV_ISA_EXT_ZVE64D, riscv_zve64d_exts), __RISCV_ISA_EXT_SUPERSET(zve64f, RISCV_ISA_EXT_ZVE64F, riscv_zve64f_exts), __RISCV_ISA_EXT_SUPERSET(zve64x, RISCV_ISA_EXT_ZVE64X, riscv_zve64x_exts), __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH), __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN), __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB), __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG), __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts), __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts), __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED), __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts), __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA), __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB), __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts), __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts), __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED), __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH), __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts), __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT), __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA), __RISCV_ISA_EXT_DATA(smmpm, RISCV_ISA_EXT_SMMPM), __RISCV_ISA_EXT_SUPERSET(smnpm, RISCV_ISA_EXT_SMNPM, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN), __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA), __RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF), __RISCV_ISA_EXT_SUPERSET(ssnpm, RISCV_ISA_EXT_SSNPM, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC), __RISCV_ISA_EXT_DATA(svade, RISCV_ISA_EXT_SVADE), __RISCV_ISA_EXT_DATA_VALIDATE(svadu, RISCV_ISA_EXT_SVADU, riscv_ext_svadu_validate), __RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL), __RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT), __RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT), __RISCV_ISA_EXT_DATA(svvptc, RISCV_ISA_EXT_SVVPTC), }; const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext); static void riscv_isa_set_ext(const struct riscv_isa_ext_data *ext, unsigned long *bitmap) { if (ext->id != RISCV_ISA_EXT_INVALID) set_bit(ext->id, bitmap); for (int i = 0; i < ext->subset_ext_size; i++) { if (ext->subset_ext_ids[i] != RISCV_ISA_EXT_INVALID) set_bit(ext->subset_ext_ids[i], bitmap); } } static const struct riscv_isa_ext_data *riscv_get_isa_ext_data(unsigned int ext_id) { for (int i = 0; i < riscv_isa_ext_count; i++) { if (riscv_isa_ext[i].id == ext_id) return &riscv_isa_ext[i]; } return NULL; } /* * "Resolve" a source ISA bitmap into one that matches kernel configuration as * well as correct extension dependencies. Some extensions depends on specific * kernel configuration to be usable (V needs CONFIG_RISCV_ISA_V for instance) * and this function will actually validate all the extensions provided in * source_isa into the resolved_isa based on extensions validate() callbacks. */ static void __init riscv_resolve_isa(unsigned long *source_isa, unsigned long *resolved_isa, unsigned long *this_hwcap, unsigned long *isa2hwcap) { bool loop; const struct riscv_isa_ext_data *ext; DECLARE_BITMAP(prev_resolved_isa, RISCV_ISA_EXT_MAX); int max_loop_count = riscv_isa_ext_count, ret; unsigned int bit; do { loop = false; if (max_loop_count-- < 0) { pr_err("Failed to reach a stable ISA state\n"); return; } bitmap_copy(prev_resolved_isa, resolved_isa, RISCV_ISA_EXT_MAX); for_each_set_bit(bit, source_isa, RISCV_ISA_EXT_MAX) { ext = riscv_get_isa_ext_data(bit); if (ext && ext->validate) { ret = ext->validate(ext, resolved_isa); if (ret == -EPROBE_DEFER) { loop = true; continue; } else if (ret) { /* Disable the extension entirely */ clear_bit(bit, source_isa); continue; } } set_bit(bit, resolved_isa); /* No need to keep it in source isa now that it is enabled */ clear_bit(bit, source_isa); /* Single letter extensions get set in hwcap */ if (bit < RISCV_ISA_EXT_BASE) *this_hwcap |= isa2hwcap[bit]; } } while (loop && memcmp(prev_resolved_isa, resolved_isa, sizeof(prev_resolved_isa))); } static void __init match_isa_ext(const char *name, const char *name_end, unsigned long *bitmap) { for (int i = 0; i < riscv_isa_ext_count; i++) { const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i]; if ((name_end - name == strlen(ext->name)) && !strncasecmp(name, ext->name, name_end - name)) { riscv_isa_set_ext(ext, bitmap); break; } } } static void __init riscv_parse_isa_string(const char *isa, unsigned long *bitmap) { /* * For all possible cpus, we have already validated in * the boot process that they at least contain "rv" and * whichever of "32"/"64" this kernel supports, and so this * section can be skipped. */ isa += 4; while (*isa) { const char *ext = isa++; const char *ext_end = isa; bool ext_err = false; switch (*ext) { case 'x': case 'X': if (acpi_disabled) pr_warn_once("Vendor extensions are ignored in riscv,isa. Use riscv,isa-extensions instead."); /* * To skip an extension, we find its end. * As multi-letter extensions must be split from other multi-letter * extensions with an "_", the end of a multi-letter extension will * either be the null character or the "_" at the start of the next * multi-letter extension. */ for (; *isa && *isa != '_'; ++isa) ; ext_err = true; break; case 's': /* * Workaround for invalid single-letter 's' & 'u' (QEMU). * No need to set the bit in riscv_isa as 's' & 'u' are * not valid ISA extensions. It works unless the first * multi-letter extension in the ISA string begins with * "Su" and is not prefixed with an underscore. */ if (ext[-1] != '_' && ext[1] == 'u') { ++isa; ext_err = true; break; } fallthrough; case 'S': case 'z': case 'Z': /* * Before attempting to parse the extension itself, we find its end. * As multi-letter extensions must be split from other multi-letter * extensions with an "_", the end of a multi-letter extension will * either be the null character or the "_" at the start of the next * multi-letter extension. * * Next, as the extensions version is currently ignored, we * eliminate that portion. This is done by parsing backwards from * the end of the extension, removing any numbers. This may be a * major or minor number however, so the process is repeated if a * minor number was found. * * ext_end is intended to represent the first character *after* the * name portion of an extension, but will be decremented to the last * character itself while eliminating the extensions version number. * A simple re-increment solves this problem. */ for (; *isa && *isa != '_'; ++isa) if (unlikely(!isalnum(*isa))) ext_err = true; ext_end = isa; if (unlikely(ext_err)) break; if (!isdigit(ext_end[-1])) break; while (isdigit(*--ext_end)) ; if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) { ++ext_end; break; } while (isdigit(*--ext_end)) ; ++ext_end; break; default: /* * Things are a little easier for single-letter extensions, as they * are parsed forwards. * * After checking that our starting position is valid, we need to * ensure that, when isa was incremented at the start of the loop, * that it arrived at the start of the next extension. * * If we are already on a non-digit, there is nothing to do. Either * we have a multi-letter extension's _, or the start of an * extension. * * Otherwise we have found the current extension's major version * number. Parse past it, and a subsequent p/minor version number * if present. The `p` extension must not appear immediately after * a number, so there is no fear of missing it. * */ if (unlikely(!isalpha(*ext))) { ext_err = true; break; } if (!isdigit(*isa)) break; while (isdigit(*++isa)) ; if (tolower(*isa) != 'p') break; if (!isdigit(*++isa)) { --isa; break; } while (isdigit(*++isa)) ; break; } /* * The parser expects that at the start of an iteration isa points to the * first character of the next extension. As we stop parsing an extension * on meeting a non-alphanumeric character, an extra increment is needed * where the succeeding extension is a multi-letter prefixed with an "_". */ if (*isa == '_') ++isa; if (unlikely(ext_err)) continue; match_isa_ext(ext, ext_end, bitmap); } } static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap) { struct device_node *node; const char *isa; int rc; struct acpi_table_header *rhct; acpi_status status; unsigned int cpu; u64 boot_vendorid; u64 boot_archid; if (!acpi_disabled) { status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct); if (ACPI_FAILURE(status)) return; } boot_vendorid = riscv_get_mvendorid(); boot_archid = riscv_get_marchid(); for_each_possible_cpu(cpu) { struct riscv_isainfo *isainfo = &hart_isa[cpu]; unsigned long this_hwcap = 0; DECLARE_BITMAP(source_isa, RISCV_ISA_EXT_MAX) = { 0 }; if (acpi_disabled) { node = of_cpu_device_node_get(cpu); if (!node) { pr_warn("Unable to find cpu node\n"); continue; } rc = of_property_read_string(node, "riscv,isa", &isa); of_node_put(node); if (rc) { pr_warn("Unable to find \"riscv,isa\" devicetree entry\n"); continue; } } else { rc = acpi_get_riscv_isa(rhct, cpu, &isa); if (rc < 0) { pr_warn("Unable to get ISA for the hart - %d\n", cpu); continue; } } riscv_parse_isa_string(isa, source_isa); /* * These ones were as they were part of the base ISA when the * port & dt-bindings were upstreamed, and so can be set * unconditionally where `i` is in riscv,isa on DT systems. */ if (acpi_disabled) { set_bit(RISCV_ISA_EXT_ZICSR, source_isa); set_bit(RISCV_ISA_EXT_ZIFENCEI, source_isa); set_bit(RISCV_ISA_EXT_ZICNTR, source_isa); set_bit(RISCV_ISA_EXT_ZIHPM, source_isa); } /* * "V" in ISA strings is ambiguous in practice: it should mean * just the standard V-1.0 but vendors aren't well behaved. * Many vendors with T-Head CPU cores which implement the 0.7.1 * version of the vector specification put "v" into their DTs. * CPU cores with the ratified spec will contain non-zero * marchid. */ if (acpi_disabled && boot_vendorid == THEAD_VENDOR_ID && boot_archid == 0x0) { this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v]; clear_bit(RISCV_ISA_EXT_v, source_isa); } riscv_resolve_isa(source_isa, isainfo->isa, &this_hwcap, isa2hwcap); /* * All "okay" hart should have same isa. Set HWCAP based on * common capabilities of every "okay" hart, in case they don't * have. */ if (elf_hwcap) elf_hwcap &= this_hwcap; else elf_hwcap = this_hwcap; if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX)) bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); else bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); } if (!acpi_disabled && rhct) acpi_put_table((struct acpi_table_header *)rhct); } static void __init riscv_fill_cpu_vendor_ext(struct device_node *cpu_node, int cpu) { if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) return; for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) { struct riscv_isa_vendor_ext_data_list *ext_list = riscv_isa_vendor_ext_list[i]; for (int j = 0; j < ext_list->ext_data_count; j++) { const struct riscv_isa_ext_data ext = ext_list->ext_data[j]; struct riscv_isavendorinfo *isavendorinfo = &ext_list->per_hart_isa_bitmap[cpu]; if (of_property_match_string(cpu_node, "riscv,isa-extensions", ext.property) < 0) continue; /* * Assume that subset extensions are all members of the * same vendor. */ if (ext.subset_ext_size) for (int k = 0; k < ext.subset_ext_size; k++) set_bit(ext.subset_ext_ids[k], isavendorinfo->isa); set_bit(ext.id, isavendorinfo->isa); } } } /* * Populate all_harts_isa_bitmap for each vendor with all of the extensions that * are shared across CPUs for that vendor. */ static void __init riscv_fill_vendor_ext_list(int cpu) { if (!IS_ENABLED(CONFIG_RISCV_ISA_VENDOR_EXT)) return; for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) { struct riscv_isa_vendor_ext_data_list *ext_list = riscv_isa_vendor_ext_list[i]; if (!ext_list->is_initialized) { bitmap_copy(ext_list->all_harts_isa_bitmap.isa, ext_list->per_hart_isa_bitmap[cpu].isa, RISCV_ISA_VENDOR_EXT_MAX); ext_list->is_initialized = true; } else { bitmap_and(ext_list->all_harts_isa_bitmap.isa, ext_list->all_harts_isa_bitmap.isa, ext_list->per_hart_isa_bitmap[cpu].isa, RISCV_ISA_VENDOR_EXT_MAX); } } } static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) { unsigned int cpu; for_each_possible_cpu(cpu) { unsigned long this_hwcap = 0; struct device_node *cpu_node; struct riscv_isainfo *isainfo = &hart_isa[cpu]; DECLARE_BITMAP(source_isa, RISCV_ISA_EXT_MAX) = { 0 }; cpu_node = of_cpu_device_node_get(cpu); if (!cpu_node) { pr_warn("Unable to find cpu node\n"); continue; } if (!of_property_present(cpu_node, "riscv,isa-extensions")) { of_node_put(cpu_node); continue; } for (int i = 0; i < riscv_isa_ext_count; i++) { const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i]; if (of_property_match_string(cpu_node, "riscv,isa-extensions", ext->property) < 0) continue; riscv_isa_set_ext(ext, source_isa); } riscv_resolve_isa(source_isa, isainfo->isa, &this_hwcap, isa2hwcap); riscv_fill_cpu_vendor_ext(cpu_node, cpu); of_node_put(cpu_node); /* * All "okay" harts should have same isa. Set HWCAP based on * common capabilities of every "okay" hart, in case they don't. */ if (elf_hwcap) elf_hwcap &= this_hwcap; else elf_hwcap = this_hwcap; if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX)) bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); else bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX); riscv_fill_vendor_ext_list(cpu); } if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX)) return -ENOENT; return 0; } #ifdef CONFIG_RISCV_ISA_FALLBACK bool __initdata riscv_isa_fallback = true; #else bool __initdata riscv_isa_fallback; static int __init riscv_isa_fallback_setup(char *__unused) { riscv_isa_fallback = true; return 1; } early_param("riscv_isa_fallback", riscv_isa_fallback_setup); #endif void __init riscv_fill_hwcap(void) { char print_str[NUM_ALPHA_EXTS + 1]; unsigned long isa2hwcap[26] = {0}; int i, j; isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I; isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M; isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A; isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F; isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D; isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C; isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V; if (!acpi_disabled) { riscv_fill_hwcap_from_isa_string(isa2hwcap); } else { int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap); if (ret && riscv_isa_fallback) { pr_info("Falling back to deprecated \"riscv,isa\"\n"); riscv_fill_hwcap_from_isa_string(isa2hwcap); } } /* * We don't support systems with F but without D, so mask those out * here. */ if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) { pr_info("This kernel does not support systems with F but not D\n"); elf_hwcap &= ~COMPAT_HWCAP_ISA_F; } if (__riscv_isa_extension_available(NULL, RISCV_ISA_EXT_ZVE32X)) { /* * This cannot fail when called on the boot hart */ riscv_v_setup_vsize(); } if (elf_hwcap & COMPAT_HWCAP_ISA_V) { /* * ISA string in device tree might have 'v' flag, but * CONFIG_RISCV_ISA_V is disabled in kernel. * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled. */ if (!IS_ENABLED(CONFIG_RISCV_ISA_V)) elf_hwcap &= ~COMPAT_HWCAP_ISA_V; } memset(print_str, 0, sizeof(print_str)); for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++) if (riscv_isa[0] & BIT_MASK(i)) print_str[j++] = (char)('a' + i); pr_info("riscv: base ISA extensions %s\n", print_str); memset(print_str, 0, sizeof(print_str)); for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++) if (elf_hwcap & BIT_MASK(i)) print_str[j++] = (char)('a' + i); pr_info("riscv: ELF capabilities %s\n", print_str); } unsigned long riscv_get_elf_hwcap(void) { unsigned long hwcap; hwcap = (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1)); if (!riscv_v_vstate_ctrl_user_allowed()) hwcap &= ~COMPAT_HWCAP_ISA_V; return hwcap; } void __init riscv_user_isa_enable(void) { if (riscv_has_extension_unlikely(RISCV_ISA_EXT_ZICBOZ)) current->thread.envcfg |= ENVCFG_CBZE; else if (any_cpu_has_zicboz) pr_warn("Zicboz disabled as it is unavailable on some harts\n"); } #ifdef CONFIG_RISCV_ALTERNATIVE /* * Alternative patch sites consider 48 bits when determining when to patch * the old instruction sequence with the new. These bits are broken into a * 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the * patch site is for an erratum, identified by the 32-bit patch ID. When * the vendor ID is zero, the patch site is for a cpufeature. cpufeatures * further break down patch ID into two 16-bit numbers. The lower 16 bits * are the cpufeature ID and the upper 16 bits are used for a value specific * to the cpufeature and patch site. If the upper 16 bits are zero, then it * implies no specific value is specified. cpufeatures that want to control * patching on a per-site basis will provide non-zero values and implement * checks here. The checks return true when patching should be done, and * false otherwise. */ static bool riscv_cpufeature_patch_check(u16 id, u16 value) { if (!value) return true; switch (id) { case RISCV_ISA_EXT_ZICBOZ: /* * Zicboz alternative applications provide the maximum * supported block size order, or zero when it doesn't * matter. If the current block size exceeds the maximum, * then the alternative cannot be applied. */ return riscv_cboz_block_size <= (1U << value); } return false; } void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end, unsigned int stage) { struct alt_entry *alt; void *oldptr, *altptr; u16 id, value, vendor; if (stage == RISCV_ALTERNATIVES_EARLY_BOOT) return; for (alt = begin; alt < end; alt++) { id = PATCH_ID_CPUFEATURE_ID(alt->patch_id); vendor = PATCH_ID_CPUFEATURE_ID(alt->vendor_id); /* * Any alternative with a patch_id that is less than * RISCV_ISA_EXT_MAX is interpreted as a standard extension. * * Any alternative with patch_id that is greater than or equal * to RISCV_VENDOR_EXT_ALTERNATIVES_BASE is interpreted as a * vendor extension. */ if (id < RISCV_ISA_EXT_MAX) { /* * This patch should be treated as errata so skip * processing here. */ if (alt->vendor_id != 0) continue; if (!__riscv_isa_extension_available(NULL, id)) continue; value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id); if (!riscv_cpufeature_patch_check(id, value)) continue; } else if (id >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE) { if (!__riscv_isa_vendor_extension_available(VENDOR_EXT_ALL_CPUS, vendor, id - RISCV_VENDOR_EXT_ALTERNATIVES_BASE)) continue; } else { WARN(1, "This extension id:%d is not in ISA extension list", id); continue; } oldptr = ALT_OLD_PTR(alt); altptr = ALT_ALT_PTR(alt); mutex_lock(&text_mutex); patch_text_nosync(oldptr, altptr, alt->alt_len); riscv_alternative_fix_offsets(oldptr, alt->alt_len, oldptr - altptr); mutex_unlock(&text_mutex); } } #endif
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved. * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * BSD LICENSE * * Copyright (C) 2016 Advanced Micro Devices, Inc. All Rights Reserved. * Copyright (C) 2016 T-Platforms. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copy * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of AMD Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * AMD PCIe NTB Linux driver * * Contact Information: * Xiangliang Yu <[email protected]> */ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/acpi.h> #include <linux/pci.h> #include <linux/random.h> #include <linux/slab.h> #include <linux/ntb.h> #include "ntb_hw_amd.h" #define NTB_NAME "ntb_hw_amd" #define NTB_DESC "AMD(R) PCI-E Non-Transparent Bridge Driver" #define NTB_VER "1.0" MODULE_DESCRIPTION(NTB_DESC); MODULE_VERSION(NTB_VER); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("AMD Inc."); static const struct file_operations amd_ntb_debugfs_info; static struct dentry *debugfs_dir; static int ndev_mw_to_bar(struct amd_ntb_dev *ndev, int idx) { if (idx < 0 || idx > ndev->mw_count) return -EINVAL; return ndev->dev_data->mw_idx << idx; } static int amd_ntb_mw_count(struct ntb_dev *ntb, int pidx) { if (pidx != NTB_DEF_PEER_IDX) return -EINVAL; return ntb_ndev(ntb)->mw_count; } static int amd_ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int idx, resource_size_t *addr_align, resource_size_t *size_align, resource_size_t *size_max) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); int bar; if (pidx != NTB_DEF_PEER_IDX) return -EINVAL; bar = ndev_mw_to_bar(ndev, idx); if (bar < 0) return bar; if (addr_align) *addr_align = SZ_4K; if (size_align) *size_align = 1; if (size_max) *size_max = pci_resource_len(ndev->ntb.pdev, bar); return 0; } static int amd_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int idx, dma_addr_t addr, resource_size_t size) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); unsigned long xlat_reg, limit_reg = 0; resource_size_t mw_size; void __iomem *mmio, *peer_mmio; u64 base_addr, limit, reg_val; int bar; if (pidx != NTB_DEF_PEER_IDX) return -EINVAL; bar = ndev_mw_to_bar(ndev, idx); if (bar < 0) return bar; mw_size = pci_resource_len(ntb->pdev, bar); /* make sure the range fits in the usable mw size */ if (size > mw_size) return -EINVAL; mmio = ndev->self_mmio; peer_mmio = ndev->peer_mmio; base_addr = pci_resource_start(ntb->pdev, bar); if (bar != 1) { xlat_reg = AMD_BAR23XLAT_OFFSET + ((bar - 2) << 2); limit_reg = AMD_BAR23LMT_OFFSET + ((bar - 2) << 2); /* Set the limit if supported */ limit = size; /* set and verify setting the translation address */ write64(addr, peer_mmio + xlat_reg); reg_val = read64(peer_mmio + xlat_reg); if (reg_val != addr) { write64(0, peer_mmio + xlat_reg); return -EIO; } /* set and verify setting the limit */ write64(limit, peer_mmio + limit_reg); reg_val = read64(peer_mmio + limit_reg); if (reg_val != limit) { write64(base_addr, mmio + limit_reg); write64(0, peer_mmio + xlat_reg); return -EIO; } } else { xlat_reg = AMD_BAR1XLAT_OFFSET; limit_reg = AMD_BAR1LMT_OFFSET; /* Set the limit if supported */ limit = size; /* set and verify setting the translation address */ write64(addr, peer_mmio + xlat_reg); reg_val = read64(peer_mmio + xlat_reg); if (reg_val != addr) { write64(0, peer_mmio + xlat_reg); return -EIO; } /* set and verify setting the limit */ writel(limit, peer_mmio + limit_reg); reg_val = readl(peer_mmio + limit_reg); if (reg_val != limit) { writel(base_addr, mmio + limit_reg); writel(0, peer_mmio + xlat_reg); return -EIO; } } return 0; } static int amd_ntb_get_link_status(struct amd_ntb_dev *ndev) { struct pci_dev *pdev = NULL; struct pci_dev *pci_swds = NULL; struct pci_dev *pci_swus = NULL; u32 stat; int rc; if (ndev->ntb.topo == NTB_TOPO_SEC) { /* Locate the pointer to Downstream Switch for this device */ pci_swds = pci_upstream_bridge(ndev->ntb.pdev); if (pci_swds) { /* * Locate the pointer to Upstream Switch for * the Downstream Switch. */ pci_swus = pci_upstream_bridge(pci_swds); if (pci_swus) { rc = pcie_capability_read_dword(pci_swus, PCI_EXP_LNKCTL, &stat); if (rc) return 0; } else { return 0; } } else { return 0; } } else if (ndev->ntb.topo == NTB_TOPO_PRI) { /* * For NTB primary, we simply read the Link Status and control * register of the NTB device itself. */ pdev = ndev->ntb.pdev; rc = pcie_capability_read_dword(pdev, PCI_EXP_LNKCTL, &stat); if (rc) return 0; } else { /* Catch all for everything else */ return 0; } ndev->lnk_sta = stat; return 1; } static int amd_link_is_up(struct amd_ntb_dev *ndev) { int ret; /* * We consider the link to be up under two conditions: * * - When a link-up event is received. This is indicated by * AMD_LINK_UP_EVENT set in peer_sta. * - When driver on both sides of the link have been loaded. * This is indicated by bit 1 being set in the peer * SIDEINFO register. * * This function should return 1 when the latter of the above * two conditions is true. * * Now consider the sequence of events - Link-Up event occurs, * then the peer side driver loads. In this case, we would have * received LINK_UP event and bit 1 of peer SIDEINFO is also * set. What happens now if the link goes down? Bit 1 of * peer SIDEINFO remains set, but LINK_DOWN bit is set in * peer_sta. So we should return 0 from this function. Not only * that, we clear bit 1 of peer SIDEINFO to 0, since the peer * side driver did not even get a chance to clear it before * the link went down. This can be the case of surprise link * removal. * * LINK_UP event will always occur before the peer side driver * gets loaded the very first time. So there can be a case when * the LINK_UP event has occurred, but the peer side driver hasn't * yet loaded. We return 0 in that case. * * There is also a special case when the primary side driver is * unloaded and then loaded again. Since there is no change in * the status of NTB secondary in this case, there is no Link-Up * or Link-Down notification received. We recognize this condition * with peer_sta being set to 0. * * If bit 1 of peer SIDEINFO register is not set, then we * simply return 0 irrespective of the link up or down status * set in peer_sta. */ ret = amd_poll_link(ndev); if (ret) { /* * We need to check the below only for NTB primary. For NTB * secondary, simply checking the result of PSIDE_INFO * register will suffice. */ if (ndev->ntb.topo == NTB_TOPO_PRI) { if ((ndev->peer_sta & AMD_LINK_UP_EVENT) || (ndev->peer_sta == 0)) return ret; else if (ndev->peer_sta & AMD_LINK_DOWN_EVENT) { /* Clear peer sideinfo register */ amd_clear_side_info_reg(ndev, true); return 0; } } else { /* NTB_TOPO_SEC */ return ret; } } return 0; } static u64 amd_ntb_link_is_up(struct ntb_dev *ntb, enum ntb_speed *speed, enum ntb_width *width) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); int ret = 0; if (amd_link_is_up(ndev)) { if (speed) *speed = NTB_LNK_STA_SPEED(ndev->lnk_sta); if (width) *width = NTB_LNK_STA_WIDTH(ndev->lnk_sta); dev_dbg(&ntb->pdev->dev, "link is up.\n"); ret = 1; } else { if (speed) *speed = NTB_SPEED_NONE; if (width) *width = NTB_WIDTH_NONE; dev_dbg(&ntb->pdev->dev, "link is down.\n"); } return ret; } static int amd_ntb_link_enable(struct ntb_dev *ntb, enum ntb_speed max_speed, enum ntb_width max_width) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; /* Enable event interrupt */ ndev->int_mask &= ~AMD_EVENT_INTMASK; writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); if (ndev->ntb.topo == NTB_TOPO_SEC) return -EINVAL; dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); return 0; } static int amd_ntb_link_disable(struct ntb_dev *ntb) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; /* Disable event interrupt */ ndev->int_mask |= AMD_EVENT_INTMASK; writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); if (ndev->ntb.topo == NTB_TOPO_SEC) return -EINVAL; dev_dbg(&ntb->pdev->dev, "Enabling Link.\n"); return 0; } static int amd_ntb_peer_mw_count(struct ntb_dev *ntb) { /* The same as for inbound MWs */ return ntb_ndev(ntb)->mw_count; } static int amd_ntb_peer_mw_get_addr(struct ntb_dev *ntb, int idx, phys_addr_t *base, resource_size_t *size) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); int bar; bar = ndev_mw_to_bar(ndev, idx); if (bar < 0) return bar; if (base) *base = pci_resource_start(ndev->ntb.pdev, bar); if (size) *size = pci_resource_len(ndev->ntb.pdev, bar); return 0; } static u64 amd_ntb_db_valid_mask(struct ntb_dev *ntb) { return ntb_ndev(ntb)->db_valid_mask; } static int amd_ntb_db_vector_count(struct ntb_dev *ntb) { return ntb_ndev(ntb)->db_count; } static u64 amd_ntb_db_vector_mask(struct ntb_dev *ntb, int db_vector) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); if (db_vector < 0 || db_vector > ndev->db_count) return 0; return ntb_ndev(ntb)->db_valid_mask & (1ULL << db_vector); } static u64 amd_ntb_db_read(struct ntb_dev *ntb) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; return (u64)readw(mmio + AMD_DBSTAT_OFFSET); } static int amd_ntb_db_clear(struct ntb_dev *ntb, u64 db_bits) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; writew((u16)db_bits, mmio + AMD_DBSTAT_OFFSET); return 0; } static int amd_ntb_db_set_mask(struct ntb_dev *ntb, u64 db_bits) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; unsigned long flags; if (db_bits & ~ndev->db_valid_mask) return -EINVAL; spin_lock_irqsave(&ndev->db_mask_lock, flags); ndev->db_mask |= db_bits; writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET); spin_unlock_irqrestore(&ndev->db_mask_lock, flags); return 0; } static int amd_ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; unsigned long flags; if (db_bits & ~ndev->db_valid_mask) return -EINVAL; spin_lock_irqsave(&ndev->db_mask_lock, flags); ndev->db_mask &= ~db_bits; writew((u16)ndev->db_mask, mmio + AMD_DBMASK_OFFSET); spin_unlock_irqrestore(&ndev->db_mask_lock, flags); return 0; } static int amd_ntb_peer_db_set(struct ntb_dev *ntb, u64 db_bits) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; writew((u16)db_bits, mmio + AMD_DBREQ_OFFSET); return 0; } static int amd_ntb_spad_count(struct ntb_dev *ntb) { return ntb_ndev(ntb)->spad_count; } static u32 amd_ntb_spad_read(struct ntb_dev *ntb, int idx) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; u32 offset; if (idx < 0 || idx >= ndev->spad_count) return 0; offset = ndev->self_spad + (idx << 2); return readl(mmio + AMD_SPAD_OFFSET + offset); } static int amd_ntb_spad_write(struct ntb_dev *ntb, int idx, u32 val) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; u32 offset; if (idx < 0 || idx >= ndev->spad_count) return -EINVAL; offset = ndev->self_spad + (idx << 2); writel(val, mmio + AMD_SPAD_OFFSET + offset); return 0; } static u32 amd_ntb_peer_spad_read(struct ntb_dev *ntb, int pidx, int sidx) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; u32 offset; if (sidx < 0 || sidx >= ndev->spad_count) return -EINVAL; offset = ndev->peer_spad + (sidx << 2); return readl(mmio + AMD_SPAD_OFFSET + offset); } static int amd_ntb_peer_spad_write(struct ntb_dev *ntb, int pidx, int sidx, u32 val) { struct amd_ntb_dev *ndev = ntb_ndev(ntb); void __iomem *mmio = ndev->self_mmio; u32 offset; if (sidx < 0 || sidx >= ndev->spad_count) return -EINVAL; offset = ndev->peer_spad + (sidx << 2); writel(val, mmio + AMD_SPAD_OFFSET + offset); return 0; } static const struct ntb_dev_ops amd_ntb_ops = { .mw_count = amd_ntb_mw_count, .mw_get_align = amd_ntb_mw_get_align, .mw_set_trans = amd_ntb_mw_set_trans, .peer_mw_count = amd_ntb_peer_mw_count, .peer_mw_get_addr = amd_ntb_peer_mw_get_addr, .link_is_up = amd_ntb_link_is_up, .link_enable = amd_ntb_link_enable, .link_disable = amd_ntb_link_disable, .db_valid_mask = amd_ntb_db_valid_mask, .db_vector_count = amd_ntb_db_vector_count, .db_vector_mask = amd_ntb_db_vector_mask, .db_read = amd_ntb_db_read, .db_clear = amd_ntb_db_clear, .db_set_mask = amd_ntb_db_set_mask, .db_clear_mask = amd_ntb_db_clear_mask, .peer_db_set = amd_ntb_peer_db_set, .spad_count = amd_ntb_spad_count, .spad_read = amd_ntb_spad_read, .spad_write = amd_ntb_spad_write, .peer_spad_read = amd_ntb_peer_spad_read, .peer_spad_write = amd_ntb_peer_spad_write, }; static void amd_ack_smu(struct amd_ntb_dev *ndev, u32 bit) { void __iomem *mmio = ndev->self_mmio; int reg; reg = readl(mmio + AMD_SMUACK_OFFSET); reg |= bit; writel(reg, mmio + AMD_SMUACK_OFFSET); } static void amd_handle_event(struct amd_ntb_dev *ndev, int vec) { void __iomem *mmio = ndev->self_mmio; struct device *dev = &ndev->ntb.pdev->dev; u32 status; status = readl(mmio + AMD_INTSTAT_OFFSET); if (!(status & AMD_EVENT_INTMASK)) return; dev_dbg(dev, "status = 0x%x and vec = %d\n", status, vec); status &= AMD_EVENT_INTMASK; switch (status) { case AMD_PEER_FLUSH_EVENT: ndev->peer_sta |= AMD_PEER_FLUSH_EVENT; dev_info(dev, "Flush is done.\n"); break; case AMD_PEER_RESET_EVENT: case AMD_LINK_DOWN_EVENT: ndev->peer_sta |= status; if (status == AMD_LINK_DOWN_EVENT) ndev->peer_sta &= ~AMD_LINK_UP_EVENT; amd_ack_smu(ndev, status); /* link down first */ ntb_link_event(&ndev->ntb); /* polling peer status */ schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); break; case AMD_PEER_D3_EVENT: case AMD_PEER_PMETO_EVENT: case AMD_LINK_UP_EVENT: ndev->peer_sta |= status; if (status == AMD_LINK_UP_EVENT) ndev->peer_sta &= ~AMD_LINK_DOWN_EVENT; else if (status == AMD_PEER_D3_EVENT) ndev->peer_sta &= ~AMD_PEER_D0_EVENT; amd_ack_smu(ndev, status); /* link down */ ntb_link_event(&ndev->ntb); break; case AMD_PEER_D0_EVENT: mmio = ndev->peer_mmio; status = readl(mmio + AMD_PMESTAT_OFFSET); /* check if this is WAKEUP event */ if (status & 0x1) dev_info(dev, "Wakeup is done.\n"); ndev->peer_sta |= AMD_PEER_D0_EVENT; ndev->peer_sta &= ~AMD_PEER_D3_EVENT; amd_ack_smu(ndev, AMD_PEER_D0_EVENT); /* start a timer to poll link status */ schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); break; default: dev_info(dev, "event status = 0x%x.\n", status); break; } /* Clear the interrupt status */ writel(status, mmio + AMD_INTSTAT_OFFSET); } static void amd_handle_db_event(struct amd_ntb_dev *ndev, int vec) { struct device *dev = &ndev->ntb.pdev->dev; u64 status; status = amd_ntb_db_read(&ndev->ntb); dev_dbg(dev, "status = 0x%llx and vec = %d\n", status, vec); /* * Since we had reserved highest order bit of DB for signaling peer of * a special event, this is the only status bit we should be concerned * here now. */ if (status & BIT(ndev->db_last_bit)) { ntb_db_clear(&ndev->ntb, BIT(ndev->db_last_bit)); /* send link down event notification */ ntb_link_event(&ndev->ntb); /* * If we are here, that means the peer has signalled a special * event which notifies that the peer driver has been * un-loaded for some reason. Since there is a chance that the * peer will load its driver again sometime, we schedule link * polling routine. */ schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); } } static irqreturn_t ndev_interrupt(struct amd_ntb_dev *ndev, int vec) { dev_dbg(&ndev->ntb.pdev->dev, "vec %d\n", vec); if (vec > (AMD_DB_CNT - 1) || (ndev->msix_vec_count == 1)) amd_handle_event(ndev, vec); if (vec < AMD_DB_CNT) { amd_handle_db_event(ndev, vec); ntb_db_event(&ndev->ntb, vec); } return IRQ_HANDLED; } static irqreturn_t ndev_vec_isr(int irq, void *dev) { struct amd_ntb_vec *nvec = dev; return ndev_interrupt(nvec->ndev, nvec->num); } static irqreturn_t ndev_irq_isr(int irq, void *dev) { struct amd_ntb_dev *ndev = dev; return ndev_interrupt(ndev, irq - ndev->ntb.pdev->irq); } static int ndev_init_isr(struct amd_ntb_dev *ndev, int msix_min, int msix_max) { struct pci_dev *pdev; int rc, i, msix_count, node; pdev = ndev->ntb.pdev; node = dev_to_node(&pdev->dev); ndev->db_mask = ndev->db_valid_mask; /* Try to set up msix irq */ ndev->vec = kcalloc_node(msix_max, sizeof(*ndev->vec), GFP_KERNEL, node); if (!ndev->vec) goto err_msix_vec_alloc; ndev->msix = kcalloc_node(msix_max, sizeof(*ndev->msix), GFP_KERNEL, node); if (!ndev->msix) goto err_msix_alloc; for (i = 0; i < msix_max; ++i) ndev->msix[i].entry = i; msix_count = pci_enable_msix_range(pdev, ndev->msix, msix_min, msix_max); if (msix_count < 0) goto err_msix_enable; /* NOTE: Disable MSIX if msix count is less than 16 because of * hardware limitation. */ if (msix_count < msix_min) { pci_disable_msix(pdev); goto err_msix_enable; } for (i = 0; i < msix_count; ++i) { ndev->vec[i].ndev = ndev; ndev->vec[i].num = i; rc = request_irq(ndev->msix[i].vector, ndev_vec_isr, 0, "ndev_vec_isr", &ndev->vec[i]); if (rc) goto err_msix_request; } dev_dbg(&pdev->dev, "Using msix interrupts\n"); ndev->db_count = msix_min; ndev->msix_vec_count = msix_max; return 0; err_msix_request: while (i-- > 0) free_irq(ndev->msix[i].vector, &ndev->vec[i]); pci_disable_msix(pdev); err_msix_enable: kfree(ndev->msix); err_msix_alloc: kfree(ndev->vec); err_msix_vec_alloc: ndev->msix = NULL; ndev->vec = NULL; /* Try to set up msi irq */ rc = pci_enable_msi(pdev); if (rc) goto err_msi_enable; rc = request_irq(pdev->irq, ndev_irq_isr, 0, "ndev_irq_isr", ndev); if (rc) goto err_msi_request; dev_dbg(&pdev->dev, "Using msi interrupts\n"); ndev->db_count = 1; ndev->msix_vec_count = 1; return 0; err_msi_request: pci_disable_msi(pdev); err_msi_enable: /* Try to set up intx irq */ pci_intx(pdev, 1); rc = request_irq(pdev->irq, ndev_irq_isr, IRQF_SHARED, "ndev_irq_isr", ndev); if (rc) goto err_intx_request; dev_dbg(&pdev->dev, "Using intx interrupts\n"); ndev->db_count = 1; ndev->msix_vec_count = 1; return 0; err_intx_request: return rc; } static void ndev_deinit_isr(struct amd_ntb_dev *ndev) { struct pci_dev *pdev; void __iomem *mmio = ndev->self_mmio; int i; pdev = ndev->ntb.pdev; /* Mask all doorbell interrupts */ ndev->db_mask = ndev->db_valid_mask; writel(ndev->db_mask, mmio + AMD_DBMASK_OFFSET); if (ndev->msix) { i = ndev->msix_vec_count; while (i--) free_irq(ndev->msix[i].vector, &ndev->vec[i]); pci_disable_msix(pdev); kfree(ndev->msix); kfree(ndev->vec); } else { free_irq(pdev->irq, ndev); if (pci_dev_msi_enabled(pdev)) pci_disable_msi(pdev); else pci_intx(pdev, 0); } } static ssize_t ndev_debugfs_read(struct file *filp, char __user *ubuf, size_t count, loff_t *offp) { struct amd_ntb_dev *ndev; void __iomem *mmio; char *buf; size_t buf_size; ssize_t ret, off; union { u64 v64; u32 v32; u16 v16; } u; ndev = filp->private_data; mmio = ndev->self_mmio; buf_size = min(count, 0x800ul); buf = kmalloc(buf_size, GFP_KERNEL); if (!buf) return -ENOMEM; off = 0; off += scnprintf(buf + off, buf_size - off, "NTB Device Information:\n"); off += scnprintf(buf + off, buf_size - off, "Connection Topology -\t%s\n", ntb_topo_string(ndev->ntb.topo)); off += scnprintf(buf + off, buf_size - off, "LNK STA -\t\t%#06x\n", ndev->lnk_sta); if (!amd_link_is_up(ndev)) { off += scnprintf(buf + off, buf_size - off, "Link Status -\t\tDown\n"); } else { off += scnprintf(buf + off, buf_size - off, "Link Status -\t\tUp\n"); off += scnprintf(buf + off, buf_size - off, "Link Speed -\t\tPCI-E Gen %u\n", NTB_LNK_STA_SPEED(ndev->lnk_sta)); off += scnprintf(buf + off, buf_size - off, "Link Width -\t\tx%u\n", NTB_LNK_STA_WIDTH(ndev->lnk_sta)); } off += scnprintf(buf + off, buf_size - off, "Memory Window Count -\t%u\n", ndev->mw_count); off += scnprintf(buf + off, buf_size - off, "Scratchpad Count -\t%u\n", ndev->spad_count); off += scnprintf(buf + off, buf_size - off, "Doorbell Count -\t%u\n", ndev->db_count); off += scnprintf(buf + off, buf_size - off, "MSIX Vector Count -\t%u\n", ndev->msix_vec_count); off += scnprintf(buf + off, buf_size - off, "Doorbell Valid Mask -\t%#llx\n", ndev->db_valid_mask); u.v32 = readl(ndev->self_mmio + AMD_DBMASK_OFFSET); off += scnprintf(buf + off, buf_size - off, "Doorbell Mask -\t\t\t%#06x\n", u.v32); u.v32 = readl(mmio + AMD_DBSTAT_OFFSET); off += scnprintf(buf + off, buf_size - off, "Doorbell Bell -\t\t\t%#06x\n", u.v32); off += scnprintf(buf + off, buf_size - off, "\nNTB Incoming XLAT:\n"); u.v64 = read64(mmio + AMD_BAR1XLAT_OFFSET); off += scnprintf(buf + off, buf_size - off, "XLAT1 -\t\t%#018llx\n", u.v64); u.v64 = read64(ndev->self_mmio + AMD_BAR23XLAT_OFFSET); off += scnprintf(buf + off, buf_size - off, "XLAT23 -\t\t%#018llx\n", u.v64); u.v64 = read64(ndev->self_mmio + AMD_BAR45XLAT_OFFSET); off += scnprintf(buf + off, buf_size - off, "XLAT45 -\t\t%#018llx\n", u.v64); u.v32 = readl(mmio + AMD_BAR1LMT_OFFSET); off += scnprintf(buf + off, buf_size - off, "LMT1 -\t\t\t%#06x\n", u.v32); u.v64 = read64(ndev->self_mmio + AMD_BAR23LMT_OFFSET); off += scnprintf(buf + off, buf_size - off, "LMT23 -\t\t\t%#018llx\n", u.v64); u.v64 = read64(ndev->self_mmio + AMD_BAR45LMT_OFFSET); off += scnprintf(buf + off, buf_size - off, "LMT45 -\t\t\t%#018llx\n", u.v64); ret = simple_read_from_buffer(ubuf, count, offp, buf, off); kfree(buf); return ret; } static void ndev_init_debugfs(struct amd_ntb_dev *ndev) { if (!debugfs_dir) { ndev->debugfs_dir = NULL; ndev->debugfs_info = NULL; } else { ndev->debugfs_dir = debugfs_create_dir(pci_name(ndev->ntb.pdev), debugfs_dir); ndev->debugfs_info = debugfs_create_file("info", S_IRUSR, ndev->debugfs_dir, ndev, &amd_ntb_debugfs_info); } } static void ndev_deinit_debugfs(struct amd_ntb_dev *ndev) { debugfs_remove_recursive(ndev->debugfs_dir); } static inline void ndev_init_struct(struct amd_ntb_dev *ndev, struct pci_dev *pdev) { ndev->ntb.pdev = pdev; ndev->ntb.topo = NTB_TOPO_NONE; ndev->ntb.ops = &amd_ntb_ops; ndev->int_mask = AMD_EVENT_INTMASK; spin_lock_init(&ndev->db_mask_lock); } static int amd_poll_link(struct amd_ntb_dev *ndev) { void __iomem *mmio = ndev->peer_mmio; u32 reg; reg = readl(mmio + AMD_SIDEINFO_OFFSET); reg &= AMD_SIDE_READY; dev_dbg(&ndev->ntb.pdev->dev, "%s: reg_val = 0x%x.\n", __func__, reg); ndev->cntl_sta = reg; amd_ntb_get_link_status(ndev); return ndev->cntl_sta; } static void amd_link_hb(struct work_struct *work) { struct amd_ntb_dev *ndev = hb_ndev(work); if (amd_poll_link(ndev)) ntb_link_event(&ndev->ntb); if (!amd_link_is_up(ndev)) schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); } static int amd_init_isr(struct amd_ntb_dev *ndev) { return ndev_init_isr(ndev, AMD_DB_CNT, AMD_MSIX_VECTOR_CNT); } static void amd_set_side_info_reg(struct amd_ntb_dev *ndev, bool peer) { void __iomem *mmio = NULL; unsigned int reg; if (peer) mmio = ndev->peer_mmio; else mmio = ndev->self_mmio; reg = readl(mmio + AMD_SIDEINFO_OFFSET); if (!(reg & AMD_SIDE_READY)) { reg |= AMD_SIDE_READY; writel(reg, mmio + AMD_SIDEINFO_OFFSET); } } static void amd_clear_side_info_reg(struct amd_ntb_dev *ndev, bool peer) { void __iomem *mmio = NULL; unsigned int reg; if (peer) mmio = ndev->peer_mmio; else mmio = ndev->self_mmio; reg = readl(mmio + AMD_SIDEINFO_OFFSET); if (reg & AMD_SIDE_READY) { reg &= ~AMD_SIDE_READY; writel(reg, mmio + AMD_SIDEINFO_OFFSET); readl(mmio + AMD_SIDEINFO_OFFSET); } } static void amd_init_side_info(struct amd_ntb_dev *ndev) { void __iomem *mmio = ndev->self_mmio; u32 ntb_ctl; amd_set_side_info_reg(ndev, false); ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL); writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); } static void amd_deinit_side_info(struct amd_ntb_dev *ndev) { void __iomem *mmio = ndev->self_mmio; u32 ntb_ctl; amd_clear_side_info_reg(ndev, false); ntb_ctl = readl(mmio + AMD_CNTL_OFFSET); ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL); writel(ntb_ctl, mmio + AMD_CNTL_OFFSET); } static int amd_init_ntb(struct amd_ntb_dev *ndev) { void __iomem *mmio = ndev->self_mmio; ndev->mw_count = ndev->dev_data->mw_count; ndev->spad_count = AMD_SPADS_CNT; ndev->db_count = AMD_DB_CNT; switch (ndev->ntb.topo) { case NTB_TOPO_PRI: case NTB_TOPO_SEC: ndev->spad_count >>= 1; if (ndev->ntb.topo == NTB_TOPO_PRI) { ndev->self_spad = 0; ndev->peer_spad = 0x20; } else { ndev->self_spad = 0x20; ndev->peer_spad = 0; } INIT_DELAYED_WORK(&ndev->hb_timer, amd_link_hb); schedule_delayed_work(&ndev->hb_timer, AMD_LINK_HB_TIMEOUT); break; default: dev_err(&ndev->ntb.pdev->dev, "AMD NTB does not support B2B mode.\n"); return -EINVAL; } /* Mask event interrupts */ writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); return 0; } static enum ntb_topo amd_get_topo(struct amd_ntb_dev *ndev) { void __iomem *mmio = ndev->self_mmio; u32 info; info = readl(mmio + AMD_SIDEINFO_OFFSET); if (info & AMD_SIDE_MASK) return NTB_TOPO_SEC; else return NTB_TOPO_PRI; } static int amd_init_dev(struct amd_ntb_dev *ndev) { void __iomem *mmio = ndev->self_mmio; struct pci_dev *pdev; int rc = 0; pdev = ndev->ntb.pdev; ndev->ntb.topo = amd_get_topo(ndev); dev_dbg(&pdev->dev, "AMD NTB topo is %s\n", ntb_topo_string(ndev->ntb.topo)); rc = amd_init_ntb(ndev); if (rc) return rc; rc = amd_init_isr(ndev); if (rc) { dev_err(&pdev->dev, "fail to init isr.\n"); return rc; } ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; /* * We reserve the highest order bit of the DB register which will * be used to notify peer when the driver on this side is being * un-loaded. */ ndev->db_last_bit = find_last_bit((unsigned long *)&ndev->db_valid_mask, hweight64(ndev->db_valid_mask)); writew((u16)~BIT(ndev->db_last_bit), mmio + AMD_DBMASK_OFFSET); /* * Since now there is one less bit to account for, the DB count * and DB mask should be adjusted accordingly. */ ndev->db_count -= 1; ndev->db_valid_mask = BIT_ULL(ndev->db_count) - 1; /* Enable Link-Up and Link-Down event interrupts */ ndev->int_mask &= ~(AMD_LINK_UP_EVENT | AMD_LINK_DOWN_EVENT); writel(ndev->int_mask, mmio + AMD_INTMASK_OFFSET); return 0; } static void amd_deinit_dev(struct amd_ntb_dev *ndev) { cancel_delayed_work_sync(&ndev->hb_timer); ndev_deinit_isr(ndev); } static int amd_ntb_init_pci(struct amd_ntb_dev *ndev, struct pci_dev *pdev) { int rc; pci_set_drvdata(pdev, ndev); rc = pci_enable_device(pdev); if (rc) goto err_pci_enable; rc = pci_request_regions(pdev, NTB_NAME); if (rc) goto err_pci_regions; pci_set_master(pdev); rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) { rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (rc) goto err_dma_mask; dev_warn(&pdev->dev, "Cannot DMA highmem\n"); } ndev->self_mmio = pci_iomap(pdev, 0, 0); if (!ndev->self_mmio) { rc = -EIO; goto err_dma_mask; } ndev->peer_mmio = ndev->self_mmio + AMD_PEER_OFFSET; return 0; err_dma_mask: pci_release_regions(pdev); err_pci_regions: pci_disable_device(pdev); err_pci_enable: pci_set_drvdata(pdev, NULL); return rc; } static void amd_ntb_deinit_pci(struct amd_ntb_dev *ndev) { struct pci_dev *pdev = ndev->ntb.pdev; pci_iounmap(pdev, ndev->self_mmio); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } static int amd_ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct amd_ntb_dev *ndev; int rc, node; node = dev_to_node(&pdev->dev); ndev = kzalloc_node(sizeof(*ndev), GFP_KERNEL, node); if (!ndev) { rc = -ENOMEM; goto err_ndev; } ndev->dev_data = (struct ntb_dev_data *)id->driver_data; ndev_init_struct(ndev, pdev); rc = amd_ntb_init_pci(ndev, pdev); if (rc) goto err_init_pci; rc = amd_init_dev(ndev); if (rc) goto err_init_dev; /* write side info */ amd_init_side_info(ndev); amd_poll_link(ndev); ndev_init_debugfs(ndev); rc = ntb_register_device(&ndev->ntb); if (rc) goto err_register; dev_info(&pdev->dev, "NTB device registered.\n"); return 0; err_register: ndev_deinit_debugfs(ndev); amd_deinit_dev(ndev); err_init_dev: amd_ntb_deinit_pci(ndev); err_init_pci: kfree(ndev); err_ndev: return rc; } static void amd_ntb_pci_remove(struct pci_dev *pdev) { struct amd_ntb_dev *ndev = pci_get_drvdata(pdev); /* * Clear the READY bit in SIDEINFO register before sending DB event * to the peer. This will make sure that when the peer handles the * DB event, it correctly reads this bit as being 0. */ amd_deinit_side_info(ndev); ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit)); ntb_unregister_device(&ndev->ntb); ndev_deinit_debugfs(ndev); amd_deinit_dev(ndev); amd_ntb_deinit_pci(ndev); kfree(ndev); } static void amd_ntb_pci_shutdown(struct pci_dev *pdev) { struct amd_ntb_dev *ndev = pci_get_drvdata(pdev); /* Send link down notification */ ntb_link_event(&ndev->ntb); amd_deinit_side_info(ndev); ntb_peer_db_set(&ndev->ntb, BIT_ULL(ndev->db_last_bit)); ntb_unregister_device(&ndev->ntb); ndev_deinit_debugfs(ndev); amd_deinit_dev(ndev); amd_ntb_deinit_pci(ndev); kfree(ndev); } static const struct file_operations amd_ntb_debugfs_info = { .owner = THIS_MODULE, .open = simple_open, .read = ndev_debugfs_read, }; static const struct ntb_dev_data dev_data[] = { { /* for device 145b */ .mw_count = 3, .mw_idx = 1, }, { /* for device 148b */ .mw_count = 2, .mw_idx = 2, }, }; static const struct pci_device_id amd_ntb_pci_tbl[] = { { PCI_VDEVICE(AMD, 0x145b), (kernel_ulong_t)&dev_data[0] }, { PCI_VDEVICE(AMD, 0x148b), (kernel_ulong_t)&dev_data[1] }, { PCI_VDEVICE(AMD, 0x14c0), (kernel_ulong_t)&dev_data[1] }, { PCI_VDEVICE(AMD, 0x14c3), (kernel_ulong_t)&dev_data[1] }, { PCI_VDEVICE(HYGON, 0x145b), (kernel_ulong_t)&dev_data[0] }, { 0, } }; MODULE_DEVICE_TABLE(pci, amd_ntb_pci_tbl); static struct pci_driver amd_ntb_pci_driver = { .name = KBUILD_MODNAME, .id_table = amd_ntb_pci_tbl, .probe = amd_ntb_pci_probe, .remove = amd_ntb_pci_remove, .shutdown = amd_ntb_pci_shutdown, }; static int __init amd_ntb_pci_driver_init(void) { int ret; pr_info("%s %s\n", NTB_DESC, NTB_VER); if (debugfs_initialized()) debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); ret = pci_register_driver(&amd_ntb_pci_driver); if (ret) debugfs_remove_recursive(debugfs_dir); return ret; } module_init(amd_ntb_pci_driver_init); static void __exit amd_ntb_pci_driver_exit(void) { pci_unregister_driver(&amd_ntb_pci_driver); debugfs_remove_recursive(debugfs_dir); } module_exit(amd_ntb_pci_driver_exit);
// SPDX-License-Identifier: GPL-2.0 /* * ip22-hpc.c: Routines for generic manipulation of the HPC controllers. * * Copyright (C) 1996 David S. Miller ([email protected]) * Copyright (C) 1998 Ralf Baechle */ #include <linux/export.h> #include <linux/init.h> #include <linux/types.h> #include <asm/io.h> #include <asm/sgi/hpc3.h> #include <asm/sgi/ioc.h> #include <asm/sgi/ip22.h> struct hpc3_regs *hpc3c0, *hpc3c1; EXPORT_SYMBOL(hpc3c0); EXPORT_SYMBOL(hpc3c1); struct sgioc_regs *sgioc; EXPORT_SYMBOL(sgioc); /* We need software copies of these because they are write only. */ u8 sgi_ioc_reset, sgi_ioc_write; extern char *system_type; void __init sgihpc_init(void) { /* ioremap can't fail */ hpc3c0 = (struct hpc3_regs *) ioremap(HPC3_CHIP0_BASE, sizeof(struct hpc3_regs)); hpc3c1 = (struct hpc3_regs *) ioremap(HPC3_CHIP1_BASE, sizeof(struct hpc3_regs)); /* IOC lives in PBUS PIO channel 6 */ sgioc = (struct sgioc_regs *)hpc3c0->pbus_extregs[6]; hpc3c0->pbus_piocfg[6][0] |= HPC3_PIOCFG_DS16; if (ip22_is_fullhouse()) { /* Full House comes with INT2 which lives in PBUS PIO * channel 4 */ sgint = (struct sgint_regs *)hpc3c0->pbus_extregs[4]; system_type = "SGI Indigo2"; } else { /* Guiness comes with INT3 which is part of IOC */ sgint = &sgioc->int3; system_type = "SGI Indy"; } sgi_ioc_reset = (SGIOC_RESET_PPORT | SGIOC_RESET_KBDMOUSE | SGIOC_RESET_EISA | SGIOC_RESET_ISDN | SGIOC_RESET_LC0OFF); sgi_ioc_write = (SGIOC_WRITE_EASEL | SGIOC_WRITE_NTHRESH | SGIOC_WRITE_TPSPEED | SGIOC_WRITE_EPSEL | SGIOC_WRITE_U0AMODE | SGIOC_WRITE_U1AMODE); sgioc->reset = sgi_ioc_reset; sgioc->write = sgi_ioc_write; }
// SPDX-License-Identifier: GPL-2.0-only OR MIT /* * Device Tree Source for AM62 SoC Family * * Copyright (C) 2020-2024 Texas Instruments Incorporated - https://www.ti.com/ */ #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/soc/ti,sci_pm_domain.h> #include "k3-pinctrl.h" / { model = "Texas Instruments K3 AM625 SoC"; compatible = "ti,am625"; interrupt-parent = <&gic500>; #address-cells = <2>; #size-cells = <2>; chosen { }; firmware { optee { compatible = "linaro,optee-tz"; method = "smc"; }; psci: psci { compatible = "arm,psci-1.0"; method = "smc"; }; }; a53_timer0: timer-cl0-cpu0 { compatible = "arm,armv8-timer"; interrupts = <GIC_PPI 13 IRQ_TYPE_LEVEL_LOW>, /* cntpsirq */ <GIC_PPI 14 IRQ_TYPE_LEVEL_LOW>, /* cntpnsirq */ <GIC_PPI 11 IRQ_TYPE_LEVEL_LOW>, /* cntvirq */ <GIC_PPI 10 IRQ_TYPE_LEVEL_LOW>; /* cnthpirq */ }; pmu: pmu { compatible = "arm,cortex-a53-pmu"; interrupts = <GIC_PPI 7 IRQ_TYPE_LEVEL_HIGH>; }; cbass_main: bus@f0000 { bootph-all; compatible = "simple-bus"; #address-cells = <2>; #size-cells = <2>; ranges = <0x00 0x000f0000 0x00 0x000f0000 0x00 0x00030000>, /* Main MMRs */ <0x00 0x00420000 0x00 0x00420000 0x00 0x00001000>, /* ESM0 */ <0x00 0x00600000 0x00 0x00600000 0x00 0x00001100>, /* GPIO */ <0x00 0x00703000 0x00 0x00703000 0x00 0x00000200>, /* USB0 debug trace */ <0x00 0x0070c000 0x00 0x0070c000 0x00 0x00000200>, /* USB1 debug trace */ <0x00 0x00a40000 0x00 0x00a40000 0x00 0x00000800>, /* Timesync router */ <0x00 0x01000000 0x00 0x01000000 0x00 0x01b28400>, /* First peripheral window */ <0x00 0x08000000 0x00 0x08000000 0x00 0x00200000>, /* Main CPSW */ <0x00 0x0e000000 0x00 0x0e000000 0x00 0x01d20000>, /* Second peripheral window */ <0x00 0x0fd00000 0x00 0x0fd00000 0x00 0x00020000>, /* GPU */ <0x00 0x20000000 0x00 0x20000000 0x00 0x0a008000>, /* Third peripheral window */ <0x00 0x30040000 0x00 0x30040000 0x00 0x00080000>, /* PRUSS-M */ <0x00 0x30101000 0x00 0x30101000 0x00 0x00010100>, /* CSI window */ <0x00 0x30200000 0x00 0x30200000 0x00 0x00010000>, /* DSS */ <0x00 0x31000000 0x00 0x31000000 0x00 0x00050000>, /* USB0 DWC3 Core window */ <0x00 0x31100000 0x00 0x31100000 0x00 0x00050000>, /* USB1 DWC3 Core window */ <0x00 0x3b000000 0x00 0x3b000000 0x00 0x00000400>, /* GPMC0_CFG */ <0x00 0x40900000 0x00 0x40900000 0x00 0x00030000>, /* SA3UL */ <0x00 0x43600000 0x00 0x43600000 0x00 0x00010000>, /* SA3 sproxy data */ <0x00 0x44043000 0x00 0x44043000 0x00 0x00000fe0>, /* TI SCI DEBUG */ <0x00 0x44860000 0x00 0x44860000 0x00 0x00040000>, /* SA3 sproxy config */ <0x00 0x48000000 0x00 0x48000000 0x00 0x06400000>, /* DMSS */ <0x00 0x50000000 0x00 0x50000000 0x00 0x08000000>, /* GPMC0 DATA */ <0x00 0x60000000 0x00 0x60000000 0x00 0x08000000>, /* FSS0 DAT1 */ <0x00 0x70000000 0x00 0x70000000 0x00 0x00010000>, /* OCSRAM */ <0x01 0x00000000 0x01 0x00000000 0x00 0x00310000>, /* A53 PERIPHBASE */ <0x05 0x00000000 0x05 0x00000000 0x01 0x00000000>, /* FSS0 DAT3 */ /* MCU Domain Range */ <0x00 0x04000000 0x00 0x04000000 0x00 0x01ff1400>, /* Wakeup Domain Range */ <0x00 0x00b00000 0x00 0x00b00000 0x00 0x00002400>, /* VTM */ <0x00 0x2b000000 0x00 0x2b000000 0x00 0x00300400>, <0x00 0x43000000 0x00 0x43000000 0x00 0x00020000>; cbass_mcu: bus@4000000 { bootph-all; compatible = "simple-bus"; #address-cells = <2>; #size-cells = <2>; ranges = <0x00 0x04000000 0x00 0x04000000 0x00 0x01ff1400>; /* Peripheral window */ }; cbass_wakeup: bus@b00000 { bootph-all; compatible = "simple-bus"; #address-cells = <2>; #size-cells = <2>; ranges = <0x00 0x00b00000 0x00 0x00b00000 0x00 0x00002400>, /* VTM */ <0x00 0x2b000000 0x00 0x2b000000 0x00 0x00300400>, /* Peripheral Window */ <0x00 0x43000000 0x00 0x43000000 0x00 0x00020000>; }; }; dss_vp1_clk: clock-divider-oldi { compatible = "fixed-factor-clock"; clocks = <&k3_clks 186 0>; #clock-cells = <0>; clock-div = <7>; clock-mult = <1>; }; #include "k3-am62-thermal.dtsi" }; /* Now include the peripherals for each bus segments */ #include "k3-am62-main.dtsi" #include "k3-am62-mcu.dtsi" #include "k3-am62-wakeup.dtsi"
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright 2015-2017 Google, Inc */ #ifndef __LINUX_USB_PD_BDO_H #define __LINUX_USB_PD_BDO_H /* BDO : BIST Data Object */ #define BDO_MODE_RECV (0 << 28) #define BDO_MODE_TRANSMIT (1 << 28) #define BDO_MODE_COUNTERS (2 << 28) #define BDO_MODE_CARRIER0 (3 << 28) #define BDO_MODE_CARRIER1 (4 << 28) #define BDO_MODE_CARRIER2 (5 << 28) #define BDO_MODE_CARRIER3 (6 << 28) #define BDO_MODE_EYE (7 << 28) #define BDO_MODE_TESTDATA (8U << 28) #define BDO_MODE_MASK(mode) ((mode) & 0xf0000000) #endif
// SPDX-License-Identifier: MIT /* * Copyright © 2014-2019 Intel Corporation */ #include <linux/debugfs.h> #include <linux/string_helpers.h> #include "gt/intel_gt.h" #include "i915_drv.h" #include "i915_irq.h" #include "i915_memcpy.h" #include "intel_guc_capture.h" #include "intel_guc_log.h" #include "intel_guc_print.h" #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GUC) #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_2M #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_16M #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M #elif defined(CONFIG_DRM_I915_DEBUG_GEM) #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_1M #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_2M #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M #else #define GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE SZ_8K #define GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE SZ_64K #define GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE SZ_1M #endif static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log); struct guc_log_section { u32 max; u32 flag; u32 default_val; const char *name; }; static void _guc_log_init_sizes(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); static const struct guc_log_section sections[GUC_LOG_SECTIONS_LIMIT] = { { GUC_LOG_CRASH_MASK >> GUC_LOG_CRASH_SHIFT, GUC_LOG_LOG_ALLOC_UNITS, GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE, "crash dump" }, { GUC_LOG_DEBUG_MASK >> GUC_LOG_DEBUG_SHIFT, GUC_LOG_LOG_ALLOC_UNITS, GUC_LOG_DEFAULT_DEBUG_BUFFER_SIZE, "debug", }, { GUC_LOG_CAPTURE_MASK >> GUC_LOG_CAPTURE_SHIFT, GUC_LOG_CAPTURE_ALLOC_UNITS, GUC_LOG_DEFAULT_CAPTURE_BUFFER_SIZE, "capture", } }; int i; for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) log->sizes[i].bytes = sections[i].default_val; /* If debug size > 1MB then bump default crash size to keep the same units */ if (log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes >= SZ_1M && GUC_LOG_DEFAULT_CRASH_BUFFER_SIZE < SZ_1M) log->sizes[GUC_LOG_SECTIONS_CRASH].bytes = SZ_1M; /* Prepare the GuC API structure fields: */ for (i = 0; i < GUC_LOG_SECTIONS_LIMIT; i++) { /* Convert to correct units */ if ((log->sizes[i].bytes % SZ_1M) == 0) { log->sizes[i].units = SZ_1M; log->sizes[i].flag = sections[i].flag; } else { log->sizes[i].units = SZ_4K; log->sizes[i].flag = 0; } if (!IS_ALIGNED(log->sizes[i].bytes, log->sizes[i].units)) guc_err(guc, "Mis-aligned log %s size: 0x%X vs 0x%X!\n", sections[i].name, log->sizes[i].bytes, log->sizes[i].units); log->sizes[i].count = log->sizes[i].bytes / log->sizes[i].units; if (!log->sizes[i].count) { guc_err(guc, "Zero log %s size!\n", sections[i].name); } else { /* Size is +1 unit */ log->sizes[i].count--; } /* Clip to field size */ if (log->sizes[i].count > sections[i].max) { guc_err(guc, "log %s size too large: %d vs %d!\n", sections[i].name, log->sizes[i].count + 1, sections[i].max + 1); log->sizes[i].count = sections[i].max; } } if (log->sizes[GUC_LOG_SECTIONS_CRASH].units != log->sizes[GUC_LOG_SECTIONS_DEBUG].units) { guc_err(guc, "Unit mismatch for crash and debug sections: %d vs %d!\n", log->sizes[GUC_LOG_SECTIONS_CRASH].units, log->sizes[GUC_LOG_SECTIONS_DEBUG].units); log->sizes[GUC_LOG_SECTIONS_CRASH].units = log->sizes[GUC_LOG_SECTIONS_DEBUG].units; log->sizes[GUC_LOG_SECTIONS_CRASH].count = 0; } log->sizes_initialised = true; } static void guc_log_init_sizes(struct intel_guc_log *log) { if (log->sizes_initialised) return; _guc_log_init_sizes(log); } static u32 intel_guc_log_section_size_crash(struct intel_guc_log *log) { guc_log_init_sizes(log); return log->sizes[GUC_LOG_SECTIONS_CRASH].bytes; } static u32 intel_guc_log_section_size_debug(struct intel_guc_log *log) { guc_log_init_sizes(log); return log->sizes[GUC_LOG_SECTIONS_DEBUG].bytes; } u32 intel_guc_log_section_size_capture(struct intel_guc_log *log) { guc_log_init_sizes(log); return log->sizes[GUC_LOG_SECTIONS_CAPTURE].bytes; } static u32 intel_guc_log_size(struct intel_guc_log *log) { /* * GuC Log buffer Layout: * * NB: Ordering must follow "enum guc_log_buffer_type". * * +===============================+ 00B * | Debug state header | * +-------------------------------+ 32B * | Crash dump state header | * +-------------------------------+ 64B * | Capture state header | * +-------------------------------+ 96B * | | * +===============================+ PAGE_SIZE (4KB) * | Debug logs | * +===============================+ + DEBUG_SIZE * | Crash Dump logs | * +===============================+ + CRASH_SIZE * | Capture logs | * +===============================+ + CAPTURE_SIZE */ return PAGE_SIZE + intel_guc_log_section_size_crash(log) + intel_guc_log_section_size_debug(log) + intel_guc_log_section_size_capture(log); } /** * DOC: GuC firmware log * * Firmware log is enabled by setting i915.guc_log_level to the positive level. * Log data is printed out via reading debugfs i915_guc_log_dump. Reading from * i915_guc_load_status will print out firmware loading status and scratch * registers value. */ static int guc_action_flush_log_complete(struct intel_guc *guc) { u32 action[] = { INTEL_GUC_ACTION_LOG_BUFFER_FILE_FLUSH_COMPLETE, GUC_DEBUG_LOG_BUFFER }; return intel_guc_send_nb(guc, action, ARRAY_SIZE(action), 0); } static int guc_action_flush_log(struct intel_guc *guc) { u32 action[] = { INTEL_GUC_ACTION_FORCE_LOG_BUFFER_FLUSH, 0 }; return intel_guc_send(guc, action, ARRAY_SIZE(action)); } static int guc_action_control_log(struct intel_guc *guc, bool enable, bool default_logging, u32 verbosity) { u32 action[] = { INTEL_GUC_ACTION_UK_LOG_ENABLE_LOGGING, (enable ? GUC_LOG_CONTROL_LOGGING_ENABLED : 0) | (verbosity << GUC_LOG_CONTROL_VERBOSITY_SHIFT) | (default_logging ? GUC_LOG_CONTROL_DEFAULT_LOGGING : 0) }; GEM_BUG_ON(verbosity > GUC_LOG_VERBOSITY_MAX); return intel_guc_send(guc, action, ARRAY_SIZE(action)); } /* * Sub buffer switch callback. Called whenever relay has to switch to a new * sub buffer, relay stays on the same sub buffer if 0 is returned. */ static int subbuf_start_callback(struct rchan_buf *buf, void *subbuf, void *prev_subbuf, size_t prev_padding) { /* * Use no-overwrite mode by default, where relay will stop accepting * new data if there are no empty sub buffers left. * There is no strict synchronization enforced by relay between Consumer * and Producer. In overwrite mode, there is a possibility of getting * inconsistent/garbled data, the producer could be writing on to the * same sub buffer from which Consumer is reading. This can't be avoided * unless Consumer is fast enough and can always run in tandem with * Producer. */ if (relay_buf_full(buf)) return 0; return 1; } /* * file_create() callback. Creates relay file in debugfs. */ static struct dentry *create_buf_file_callback(const char *filename, struct dentry *parent, umode_t mode, struct rchan_buf *buf, int *is_global) { struct dentry *buf_file; /* * This to enable the use of a single buffer for the relay channel and * correspondingly have a single file exposed to User, through which * it can collect the logs in order without any post-processing. * Need to set 'is_global' even if parent is NULL for early logging. */ *is_global = 1; if (!parent) return NULL; buf_file = debugfs_create_file(filename, mode, parent, buf, &relay_file_operations); if (IS_ERR(buf_file)) return NULL; return buf_file; } /* * file_remove() default callback. Removes relay file in debugfs. */ static int remove_buf_file_callback(struct dentry *dentry) { debugfs_remove(dentry); return 0; } /* relay channel callbacks */ static const struct rchan_callbacks relay_callbacks = { .subbuf_start = subbuf_start_callback, .create_buf_file = create_buf_file_callback, .remove_buf_file = remove_buf_file_callback, }; static void guc_move_to_next_buf(struct intel_guc_log *log) { /* * Make sure the updates made in the sub buffer are visible when * Consumer sees the following update to offset inside the sub buffer. */ smp_wmb(); /* All data has been written, so now move the offset of sub buffer. */ relay_reserve(log->relay.channel, log->vma->obj->base.size - intel_guc_log_section_size_capture(log)); /* Switch to the next sub buffer */ relay_flush(log->relay.channel); } static void *guc_get_write_buffer(struct intel_guc_log *log) { /* * Just get the base address of a new sub buffer and copy data into it * ourselves. NULL will be returned in no-overwrite mode, if all sub * buffers are full. Could have used the relay_write() to indirectly * copy the data, but that would have been bit convoluted, as we need to * write to only certain locations inside a sub buffer which cannot be * done without using relay_reserve() along with relay_write(). So its * better to use relay_reserve() alone. */ return relay_reserve(log->relay.channel, 0); } bool intel_guc_check_log_buf_overflow(struct intel_guc_log *log, enum guc_log_buffer_type type, unsigned int full_cnt) { unsigned int prev_full_cnt = log->stats[type].sampled_overflow; bool overflow = false; if (full_cnt != prev_full_cnt) { overflow = true; log->stats[type].overflow = full_cnt; log->stats[type].sampled_overflow += full_cnt - prev_full_cnt; if (full_cnt < prev_full_cnt) { /* buffer_full_cnt is a 4 bit counter */ log->stats[type].sampled_overflow += 16; } guc_notice_ratelimited(log_to_guc(log), "log buffer overflow\n"); } return overflow; } unsigned int intel_guc_get_log_buffer_size(struct intel_guc_log *log, enum guc_log_buffer_type type) { switch (type) { case GUC_DEBUG_LOG_BUFFER: return intel_guc_log_section_size_debug(log); case GUC_CRASH_DUMP_LOG_BUFFER: return intel_guc_log_section_size_crash(log); case GUC_CAPTURE_LOG_BUFFER: return intel_guc_log_section_size_capture(log); default: MISSING_CASE(type); } return 0; } size_t intel_guc_get_log_buffer_offset(struct intel_guc_log *log, enum guc_log_buffer_type type) { enum guc_log_buffer_type i; size_t offset = PAGE_SIZE;/* for the log_buffer_states */ for (i = GUC_DEBUG_LOG_BUFFER; i < GUC_MAX_LOG_BUFFER; ++i) { if (i == type) break; offset += intel_guc_get_log_buffer_size(log, i); } return offset; } static void _guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); unsigned int buffer_size, read_offset, write_offset, bytes_to_copy, full_cnt; struct guc_log_buffer_state *log_buf_state, *log_buf_snapshot_state; struct guc_log_buffer_state log_buf_state_local; enum guc_log_buffer_type type; void *src_data, *dst_data; bool new_overflow; mutex_lock(&log->relay.lock); if (guc_WARN_ON(guc, !intel_guc_log_relay_created(log))) goto out_unlock; /* Get the pointer to shared GuC log buffer */ src_data = log->buf_addr; log_buf_state = src_data; /* Get the pointer to local buffer to store the logs */ log_buf_snapshot_state = dst_data = guc_get_write_buffer(log); if (unlikely(!log_buf_snapshot_state)) { /* * Used rate limited to avoid deluge of messages, logs might be * getting consumed by User at a slow rate. */ guc_err_ratelimited(guc, "no sub-buffer to copy general logs\n"); log->relay.full_count++; goto out_unlock; } /* Actual logs are present from the 2nd page */ src_data += PAGE_SIZE; dst_data += PAGE_SIZE; /* For relay logging, we exclude error state capture */ for (type = GUC_DEBUG_LOG_BUFFER; type <= GUC_CRASH_DUMP_LOG_BUFFER; type++) { /* * Make a copy of the state structure, inside GuC log buffer * (which is uncached mapped), on the stack to avoid reading * from it multiple times. */ memcpy(&log_buf_state_local, log_buf_state, sizeof(struct guc_log_buffer_state)); buffer_size = intel_guc_get_log_buffer_size(log, type); read_offset = log_buf_state_local.read_ptr; write_offset = log_buf_state_local.sampled_write_ptr; full_cnt = log_buf_state_local.buffer_full_cnt; /* Bookkeeping stuff */ log->stats[type].flush += log_buf_state_local.flush_to_file; new_overflow = intel_guc_check_log_buf_overflow(log, type, full_cnt); /* Update the state of shared log buffer */ log_buf_state->read_ptr = write_offset; log_buf_state->flush_to_file = 0; log_buf_state++; /* First copy the state structure in snapshot buffer */ memcpy(log_buf_snapshot_state, &log_buf_state_local, sizeof(struct guc_log_buffer_state)); /* * The write pointer could have been updated by GuC firmware, * after sending the flush interrupt to Host, for consistency * set write pointer value to same value of sampled_write_ptr * in the snapshot buffer. */ log_buf_snapshot_state->write_ptr = write_offset; log_buf_snapshot_state++; /* Now copy the actual logs. */ if (unlikely(new_overflow)) { /* copy the whole buffer in case of overflow */ read_offset = 0; write_offset = buffer_size; } else if (unlikely((read_offset > buffer_size) || (write_offset > buffer_size))) { guc_err(guc, "invalid log buffer state\n"); /* copy whole buffer as offsets are unreliable */ read_offset = 0; write_offset = buffer_size; } /* Just copy the newly written data */ if (read_offset > write_offset) { i915_memcpy_from_wc(dst_data, src_data, write_offset); bytes_to_copy = buffer_size - read_offset; } else { bytes_to_copy = write_offset - read_offset; } i915_memcpy_from_wc(dst_data + read_offset, src_data + read_offset, bytes_to_copy); src_data += buffer_size; dst_data += buffer_size; } guc_move_to_next_buf(log); out_unlock: mutex_unlock(&log->relay.lock); } static void copy_debug_logs_work(struct work_struct *work) { struct intel_guc_log *log = container_of(work, struct intel_guc_log, relay.flush_work); guc_log_copy_debuglogs_for_relay(log); } static int guc_log_relay_map(struct intel_guc_log *log) { lockdep_assert_held(&log->relay.lock); if (!log->vma || !log->buf_addr) return -ENODEV; /* * WC vmalloc mapping of log buffer pages was done at * GuC Log Init time, but lets keep a ref for book-keeping */ i915_gem_object_get(log->vma->obj); log->relay.buf_in_use = true; return 0; } static void guc_log_relay_unmap(struct intel_guc_log *log) { lockdep_assert_held(&log->relay.lock); i915_gem_object_put(log->vma->obj); log->relay.buf_in_use = false; } void intel_guc_log_init_early(struct intel_guc_log *log) { mutex_init(&log->relay.lock); INIT_WORK(&log->relay.flush_work, copy_debug_logs_work); log->relay.started = false; } static int guc_log_relay_create(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *i915 = guc_to_i915(guc); struct rchan *guc_log_relay_chan; size_t n_subbufs, subbuf_size; int ret; lockdep_assert_held(&log->relay.lock); GEM_BUG_ON(!log->vma); /* * Keep the size of sub buffers same as shared log buffer * but GuC log-events excludes the error-state-capture logs */ subbuf_size = log->vma->size - intel_guc_log_section_size_capture(log); /* * Store up to 8 snapshots, which is large enough to buffer sufficient * boot time logs and provides enough leeway to User, in terms of * latency, for consuming the logs from relay. Also doesn't take * up too much memory. */ n_subbufs = 8; if (!guc->dbgfs_node) return -ENOENT; guc_log_relay_chan = relay_open("guc_log", guc->dbgfs_node, subbuf_size, n_subbufs, &relay_callbacks, i915); if (!guc_log_relay_chan) { guc_err(guc, "Couldn't create relay channel for logging\n"); ret = -ENOMEM; return ret; } GEM_BUG_ON(guc_log_relay_chan->subbuf_size < subbuf_size); log->relay.channel = guc_log_relay_chan; return 0; } static void guc_log_relay_destroy(struct intel_guc_log *log) { lockdep_assert_held(&log->relay.lock); relay_close(log->relay.channel); log->relay.channel = NULL; } static void guc_log_copy_debuglogs_for_relay(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *i915 = guc_to_i915(guc); intel_wakeref_t wakeref; _guc_log_copy_debuglogs_for_relay(log); /* * Generally device is expected to be active only at this * time, so get/put should be really quick. */ with_intel_runtime_pm(&i915->runtime_pm, wakeref) guc_action_flush_log_complete(guc); } static u32 __get_default_log_level(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *i915 = guc_to_i915(guc); /* A negative value means "use platform/config default" */ if (i915->params.guc_log_level < 0) { return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_NON_VERBOSE; } if (i915->params.guc_log_level > GUC_LOG_LEVEL_MAX) { guc_warn(guc, "Log verbosity param out of range: %d > %d!\n", i915->params.guc_log_level, GUC_LOG_LEVEL_MAX); return (IS_ENABLED(CONFIG_DRM_I915_DEBUG) || IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) ? GUC_LOG_LEVEL_MAX : GUC_LOG_LEVEL_DISABLED; } GEM_BUG_ON(i915->params.guc_log_level < GUC_LOG_LEVEL_DISABLED); GEM_BUG_ON(i915->params.guc_log_level > GUC_LOG_LEVEL_MAX); return i915->params.guc_log_level; } int intel_guc_log_create(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct i915_vma *vma; void *vaddr; u32 guc_log_size; int ret; GEM_BUG_ON(log->vma); guc_log_size = intel_guc_log_size(log); vma = intel_guc_allocate_vma(guc, guc_log_size); if (IS_ERR(vma)) { ret = PTR_ERR(vma); goto err; } log->vma = vma; /* * Create a WC (Uncached for read) vmalloc mapping up front immediate access to * data from memory during critical events such as error capture */ vaddr = i915_gem_object_pin_map_unlocked(log->vma->obj, I915_MAP_WC); if (IS_ERR(vaddr)) { ret = PTR_ERR(vaddr); i915_vma_unpin_and_release(&log->vma, 0); goto err; } log->buf_addr = vaddr; log->level = __get_default_log_level(log); guc_dbg(guc, "guc_log_level=%d (%s, verbose:%s, verbosity:%d)\n", log->level, str_enabled_disabled(log->level), str_yes_no(GUC_LOG_LEVEL_IS_VERBOSE(log->level)), GUC_LOG_LEVEL_TO_VERBOSITY(log->level)); return 0; err: guc_err(guc, "Failed to allocate or map log buffer %pe\n", ERR_PTR(ret)); return ret; } void intel_guc_log_destroy(struct intel_guc_log *log) { log->buf_addr = NULL; i915_vma_unpin_and_release(&log->vma, I915_VMA_RELEASE_MAP); } int intel_guc_log_set_level(struct intel_guc_log *log, u32 level) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *i915 = guc_to_i915(guc); intel_wakeref_t wakeref; int ret = 0; BUILD_BUG_ON(GUC_LOG_VERBOSITY_MIN != 0); GEM_BUG_ON(!log->vma); /* * GuC is recognizing log levels starting from 0 to max, we're using 0 * as indication that logging should be disabled. */ if (level < GUC_LOG_LEVEL_DISABLED || level > GUC_LOG_LEVEL_MAX) return -EINVAL; mutex_lock(&i915->drm.struct_mutex); if (log->level == level) goto out_unlock; with_intel_runtime_pm(&i915->runtime_pm, wakeref) ret = guc_action_control_log(guc, GUC_LOG_LEVEL_IS_VERBOSE(level), GUC_LOG_LEVEL_IS_ENABLED(level), GUC_LOG_LEVEL_TO_VERBOSITY(level)); if (ret) { guc_dbg(guc, "guc_log_control action failed %pe\n", ERR_PTR(ret)); goto out_unlock; } log->level = level; out_unlock: mutex_unlock(&i915->drm.struct_mutex); return ret; } bool intel_guc_log_relay_created(const struct intel_guc_log *log) { return log->buf_addr; } int intel_guc_log_relay_open(struct intel_guc_log *log) { int ret; if (!log->vma) return -ENODEV; mutex_lock(&log->relay.lock); if (intel_guc_log_relay_created(log)) { ret = -EEXIST; goto out_unlock; } /* * We require SSE 4.1 for fast reads from the GuC log buffer and * it should be present on the chipsets supporting GuC based * submissions. */ if (!i915_has_memcpy_from_wc()) { ret = -ENXIO; goto out_unlock; } ret = guc_log_relay_create(log); if (ret) goto out_unlock; ret = guc_log_relay_map(log); if (ret) goto out_relay; mutex_unlock(&log->relay.lock); return 0; out_relay: guc_log_relay_destroy(log); out_unlock: mutex_unlock(&log->relay.lock); return ret; } int intel_guc_log_relay_start(struct intel_guc_log *log) { if (log->relay.started) return -EEXIST; /* * When GuC is logging without us relaying to userspace, we're ignoring * the flush notification. This means that we need to unconditionally * flush on relay enabling, since GuC only notifies us once. */ queue_work(system_highpri_wq, &log->relay.flush_work); log->relay.started = true; return 0; } void intel_guc_log_relay_flush(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); intel_wakeref_t wakeref; if (!log->relay.started) return; /* * Before initiating the forceful flush, wait for any pending/ongoing * flush to complete otherwise forceful flush may not actually happen. */ flush_work(&log->relay.flush_work); with_intel_runtime_pm(guc_to_gt(guc)->uncore->rpm, wakeref) guc_action_flush_log(guc); /* GuC would have updated log buffer by now, so copy it */ guc_log_copy_debuglogs_for_relay(log); } /* * Stops the relay log. Called from intel_guc_log_relay_close(), so no * possibility of race with start/flush since relay_write cannot race * relay_close. */ static void guc_log_relay_stop(struct intel_guc_log *log) { struct intel_guc *guc = log_to_guc(log); struct drm_i915_private *i915 = guc_to_i915(guc); if (!log->relay.started) return; intel_synchronize_irq(i915); flush_work(&log->relay.flush_work); log->relay.started = false; } void intel_guc_log_relay_close(struct intel_guc_log *log) { guc_log_relay_stop(log); mutex_lock(&log->relay.lock); GEM_BUG_ON(!intel_guc_log_relay_created(log)); guc_log_relay_unmap(log); guc_log_relay_destroy(log); mutex_unlock(&log->relay.lock); } void intel_guc_log_handle_flush_event(struct intel_guc_log *log) { if (log->relay.started) queue_work(system_highpri_wq, &log->relay.flush_work); } static const char * stringify_guc_log_type(enum guc_log_buffer_type type) { switch (type) { case GUC_DEBUG_LOG_BUFFER: return "DEBUG"; case GUC_CRASH_DUMP_LOG_BUFFER: return "CRASH"; case GUC_CAPTURE_LOG_BUFFER: return "CAPTURE"; default: MISSING_CASE(type); } return ""; } /** * intel_guc_log_info - dump information about GuC log relay * @log: the GuC log * @p: the &drm_printer * * Pretty printer for GuC log info */ void intel_guc_log_info(struct intel_guc_log *log, struct drm_printer *p) { enum guc_log_buffer_type type; if (!intel_guc_log_relay_created(log)) { drm_puts(p, "GuC log relay not created\n"); return; } drm_puts(p, "GuC logging stats:\n"); drm_printf(p, "\tRelay full count: %u\n", log->relay.full_count); for (type = GUC_DEBUG_LOG_BUFFER; type < GUC_MAX_LOG_BUFFER; type++) { drm_printf(p, "\t%s:\tflush count %10u, overflow count %10u\n", stringify_guc_log_type(type), log->stats[type].flush, log->stats[type].sampled_overflow); } } /** * intel_guc_log_dump - dump the contents of the GuC log * @log: the GuC log * @p: the &drm_printer * @dump_load_err: dump the log saved on GuC load error * * Pretty printer for the GuC log */ int intel_guc_log_dump(struct intel_guc_log *log, struct drm_printer *p, bool dump_load_err) { struct intel_guc *guc = log_to_guc(log); struct intel_uc *uc = container_of(guc, struct intel_uc, guc); struct drm_i915_gem_object *obj = NULL; void *map; u32 *page; int i, j; if (!intel_guc_is_supported(guc)) return -ENODEV; if (dump_load_err) obj = uc->load_err_log; else if (guc->log.vma) obj = guc->log.vma->obj; if (!obj) return 0; page = (u32 *)__get_free_page(GFP_KERNEL); if (!page) return -ENOMEM; intel_guc_dump_time_info(guc, p); map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC); if (IS_ERR(map)) { guc_dbg(guc, "Failed to pin log object: %pe\n", map); drm_puts(p, "(log data unaccessible)\n"); free_page((unsigned long)page); return PTR_ERR(map); } for (i = 0; i < obj->base.size; i += PAGE_SIZE) { if (!i915_memcpy_from_wc(page, map + i, PAGE_SIZE)) memcpy(page, map + i, PAGE_SIZE); for (j = 0; j < PAGE_SIZE / sizeof(u32); j += 4) drm_printf(p, "0x%08x 0x%08x 0x%08x 0x%08x\n", *(page + j + 0), *(page + j + 1), *(page + j + 2), *(page + j + 3)); } drm_puts(p, "\n"); i915_gem_object_unpin_map(obj); free_page((unsigned long)page); return 0; }
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Broadcom Corporation * */ #include <linux/module.h> #include <linux/init.h> #include <linux/input.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/keyboard.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of.h> #include <asm/irq.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/serio.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #define IPROC_TS_NAME "iproc-ts" #define PEN_DOWN_STATUS 1 #define PEN_UP_STATUS 0 #define X_MIN 0 #define Y_MIN 0 #define X_MAX 0xFFF #define Y_MAX 0xFFF /* Value given by controller for invalid coordinate. */ #define INVALID_COORD 0xFFFFFFFF /* Register offsets */ #define REGCTL1 0x00 #define REGCTL2 0x04 #define INTERRUPT_THRES 0x08 #define INTERRUPT_MASK 0x0c #define INTERRUPT_STATUS 0x10 #define CONTROLLER_STATUS 0x14 #define FIFO_DATA 0x18 #define FIFO_DATA_X_Y_MASK 0xFFFF #define ANALOG_CONTROL 0x1c #define AUX_DATA 0x20 #define DEBOUNCE_CNTR_STAT 0x24 #define SCAN_CNTR_STAT 0x28 #define REM_CNTR_STAT 0x2c #define SETTLING_TIMER_STAT 0x30 #define SPARE_REG 0x34 #define SOFT_BYPASS_CONTROL 0x38 #define SOFT_BYPASS_DATA 0x3c /* Bit values for INTERRUPT_MASK and INTERRUPT_STATUS regs */ #define TS_PEN_INTR_MASK BIT(0) #define TS_FIFO_INTR_MASK BIT(2) /* Bit values for CONTROLLER_STATUS reg1 */ #define TS_PEN_DOWN BIT(0) /* Shift values for control reg1 */ #define SCANNING_PERIOD_SHIFT 24 #define DEBOUNCE_TIMEOUT_SHIFT 16 #define SETTLING_TIMEOUT_SHIFT 8 #define TOUCH_TIMEOUT_SHIFT 0 /* Shift values for coordinates from fifo */ #define X_COORD_SHIFT 0 #define Y_COORD_SHIFT 16 /* Bit values for REGCTL2 */ #define TS_CONTROLLER_EN_BIT BIT(16) #define TS_CONTROLLER_AVGDATA_SHIFT 8 #define TS_CONTROLLER_AVGDATA_MASK (0x7 << TS_CONTROLLER_AVGDATA_SHIFT) #define TS_CONTROLLER_PWR_LDO BIT(5) #define TS_CONTROLLER_PWR_ADC BIT(4) #define TS_CONTROLLER_PWR_BGP BIT(3) #define TS_CONTROLLER_PWR_TS BIT(2) #define TS_WIRE_MODE_BIT BIT(1) #define dbg_reg(dev, priv, reg) \ do { \ u32 val; \ regmap_read(priv->regmap, reg, &val); \ dev_dbg(dev, "%20s= 0x%08x\n", #reg, val); \ } while (0) struct tsc_param { /* Each step is 1024 us. Valid 1-256 */ u32 scanning_period; /* Each step is 512 us. Valid 0-255 */ u32 debounce_timeout; /* * The settling duration (in ms) is the amount of time the tsc * waits to allow the voltage to settle after turning on the * drivers in detection mode. Valid values: 0-11 * 0 = 0.008 ms * 1 = 0.01 ms * 2 = 0.02 ms * 3 = 0.04 ms * 4 = 0.08 ms * 5 = 0.16 ms * 6 = 0.32 ms * 7 = 0.64 ms * 8 = 1.28 ms * 9 = 2.56 ms * 10 = 5.12 ms * 11 = 10.24 ms */ u32 settling_timeout; /* touch timeout in sample counts */ u32 touch_timeout; /* * Number of data samples which are averaged before a final data point * is placed into the FIFO */ u32 average_data; /* FIFO threshold */ u32 fifo_threshold; /* Optional standard touchscreen properties. */ u32 max_x; u32 max_y; u32 fuzz_x; u32 fuzz_y; bool invert_x; bool invert_y; }; struct iproc_ts_priv { struct platform_device *pdev; struct input_dev *idev; struct regmap *regmap; struct clk *tsc_clk; int pen_status; struct tsc_param cfg_params; }; /* * Set default values the same as hardware reset values * except for fifo_threshold with is set to 1. */ static const struct tsc_param iproc_default_config = { .scanning_period = 0x5, /* 1 to 256 */ .debounce_timeout = 0x28, /* 0 to 255 */ .settling_timeout = 0x7, /* 0 to 11 */ .touch_timeout = 0xa, /* 0 to 255 */ .average_data = 5, /* entry 5 = 32 pts */ .fifo_threshold = 1, /* 0 to 31 */ .max_x = X_MAX, .max_y = Y_MAX, }; static void ts_reg_dump(struct iproc_ts_priv *priv) { struct device *dev = &priv->pdev->dev; dbg_reg(dev, priv, REGCTL1); dbg_reg(dev, priv, REGCTL2); dbg_reg(dev, priv, INTERRUPT_THRES); dbg_reg(dev, priv, INTERRUPT_MASK); dbg_reg(dev, priv, INTERRUPT_STATUS); dbg_reg(dev, priv, CONTROLLER_STATUS); dbg_reg(dev, priv, FIFO_DATA); dbg_reg(dev, priv, ANALOG_CONTROL); dbg_reg(dev, priv, AUX_DATA); dbg_reg(dev, priv, DEBOUNCE_CNTR_STAT); dbg_reg(dev, priv, SCAN_CNTR_STAT); dbg_reg(dev, priv, REM_CNTR_STAT); dbg_reg(dev, priv, SETTLING_TIMER_STAT); dbg_reg(dev, priv, SPARE_REG); dbg_reg(dev, priv, SOFT_BYPASS_CONTROL); dbg_reg(dev, priv, SOFT_BYPASS_DATA); } static irqreturn_t iproc_touchscreen_interrupt(int irq, void *data) { struct platform_device *pdev = data; struct iproc_ts_priv *priv = platform_get_drvdata(pdev); u32 intr_status; u32 raw_coordinate; u16 x; u16 y; int i; bool needs_sync = false; regmap_read(priv->regmap, INTERRUPT_STATUS, &intr_status); intr_status &= TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK; if (intr_status == 0) return IRQ_NONE; /* Clear all interrupt status bits, write-1-clear */ regmap_write(priv->regmap, INTERRUPT_STATUS, intr_status); /* Pen up/down */ if (intr_status & TS_PEN_INTR_MASK) { regmap_read(priv->regmap, CONTROLLER_STATUS, &priv->pen_status); if (priv->pen_status & TS_PEN_DOWN) priv->pen_status = PEN_DOWN_STATUS; else priv->pen_status = PEN_UP_STATUS; input_report_key(priv->idev, BTN_TOUCH, priv->pen_status); needs_sync = true; dev_dbg(&priv->pdev->dev, "pen up-down (%d)\n", priv->pen_status); } /* coordinates in FIFO exceed the threshold */ if (intr_status & TS_FIFO_INTR_MASK) { for (i = 0; i < priv->cfg_params.fifo_threshold; i++) { regmap_read(priv->regmap, FIFO_DATA, &raw_coordinate); if (raw_coordinate == INVALID_COORD) continue; /* * The x and y coordinate are 16 bits each * with the x in the lower 16 bits and y in the * upper 16 bits. */ x = (raw_coordinate >> X_COORD_SHIFT) & FIFO_DATA_X_Y_MASK; y = (raw_coordinate >> Y_COORD_SHIFT) & FIFO_DATA_X_Y_MASK; /* We only want to retain the 12 msb of the 16 */ x = (x >> 4) & 0x0FFF; y = (y >> 4) & 0x0FFF; /* Adjust x y according to LCD tsc mount angle. */ if (priv->cfg_params.invert_x) x = priv->cfg_params.max_x - x; if (priv->cfg_params.invert_y) y = priv->cfg_params.max_y - y; input_report_abs(priv->idev, ABS_X, x); input_report_abs(priv->idev, ABS_Y, y); needs_sync = true; dev_dbg(&priv->pdev->dev, "xy (0x%x 0x%x)\n", x, y); } } if (needs_sync) input_sync(priv->idev); return IRQ_HANDLED; } static int iproc_ts_start(struct input_dev *idev) { u32 val; u32 mask; int error; struct iproc_ts_priv *priv = input_get_drvdata(idev); /* Enable clock */ error = clk_prepare_enable(priv->tsc_clk); if (error) { dev_err(&priv->pdev->dev, "%s clk_prepare_enable failed %d\n", __func__, error); return error; } /* * Interrupt is generated when: * FIFO reaches the int_th value, and pen event(up/down) */ val = TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK; regmap_update_bits(priv->regmap, INTERRUPT_MASK, val, val); val = priv->cfg_params.fifo_threshold; regmap_write(priv->regmap, INTERRUPT_THRES, val); /* Initialize control reg1 */ val = 0; val |= priv->cfg_params.scanning_period << SCANNING_PERIOD_SHIFT; val |= priv->cfg_params.debounce_timeout << DEBOUNCE_TIMEOUT_SHIFT; val |= priv->cfg_params.settling_timeout << SETTLING_TIMEOUT_SHIFT; val |= priv->cfg_params.touch_timeout << TOUCH_TIMEOUT_SHIFT; regmap_write(priv->regmap, REGCTL1, val); /* Try to clear all interrupt status */ val = TS_FIFO_INTR_MASK | TS_PEN_INTR_MASK; regmap_update_bits(priv->regmap, INTERRUPT_STATUS, val, val); /* Initialize control reg2 */ val = TS_CONTROLLER_EN_BIT | TS_WIRE_MODE_BIT; val |= priv->cfg_params.average_data << TS_CONTROLLER_AVGDATA_SHIFT; mask = (TS_CONTROLLER_AVGDATA_MASK); mask |= (TS_CONTROLLER_PWR_LDO | /* PWR up LDO */ TS_CONTROLLER_PWR_ADC | /* PWR up ADC */ TS_CONTROLLER_PWR_BGP | /* PWR up BGP */ TS_CONTROLLER_PWR_TS); /* PWR up TS */ mask |= val; regmap_update_bits(priv->regmap, REGCTL2, mask, val); ts_reg_dump(priv); return 0; } static void iproc_ts_stop(struct input_dev *dev) { u32 val; struct iproc_ts_priv *priv = input_get_drvdata(dev); /* * Disable FIFO int_th and pen event(up/down)Interrupts only * as the interrupt mask register is shared between ADC, TS and * flextimer. */ val = TS_PEN_INTR_MASK | TS_FIFO_INTR_MASK; regmap_update_bits(priv->regmap, INTERRUPT_MASK, val, 0); /* Only power down touch screen controller */ val = TS_CONTROLLER_PWR_TS; regmap_update_bits(priv->regmap, REGCTL2, val, val); clk_disable(priv->tsc_clk); } static int iproc_get_tsc_config(struct device *dev, struct iproc_ts_priv *priv) { struct device_node *np = dev->of_node; u32 val; priv->cfg_params = iproc_default_config; if (!np) return 0; if (of_property_read_u32(np, "scanning_period", &val) >= 0) { if (val < 1 || val > 256) { dev_err(dev, "scanning_period (%u) must be [1-256]\n", val); return -EINVAL; } priv->cfg_params.scanning_period = val; } if (of_property_read_u32(np, "debounce_timeout", &val) >= 0) { if (val > 255) { dev_err(dev, "debounce_timeout (%u) must be [0-255]\n", val); return -EINVAL; } priv->cfg_params.debounce_timeout = val; } if (of_property_read_u32(np, "settling_timeout", &val) >= 0) { if (val > 11) { dev_err(dev, "settling_timeout (%u) must be [0-11]\n", val); return -EINVAL; } priv->cfg_params.settling_timeout = val; } if (of_property_read_u32(np, "touch_timeout", &val) >= 0) { if (val > 255) { dev_err(dev, "touch_timeout (%u) must be [0-255]\n", val); return -EINVAL; } priv->cfg_params.touch_timeout = val; } if (of_property_read_u32(np, "average_data", &val) >= 0) { if (val > 8) { dev_err(dev, "average_data (%u) must be [0-8]\n", val); return -EINVAL; } priv->cfg_params.average_data = val; } if (of_property_read_u32(np, "fifo_threshold", &val) >= 0) { if (val > 31) { dev_err(dev, "fifo_threshold (%u)) must be [0-31]\n", val); return -EINVAL; } priv->cfg_params.fifo_threshold = val; } /* Parse optional properties. */ of_property_read_u32(np, "touchscreen-size-x", &priv->cfg_params.max_x); of_property_read_u32(np, "touchscreen-size-y", &priv->cfg_params.max_y); of_property_read_u32(np, "touchscreen-fuzz-x", &priv->cfg_params.fuzz_x); of_property_read_u32(np, "touchscreen-fuzz-y", &priv->cfg_params.fuzz_y); priv->cfg_params.invert_x = of_property_read_bool(np, "touchscreen-inverted-x"); priv->cfg_params.invert_y = of_property_read_bool(np, "touchscreen-inverted-y"); return 0; } static int iproc_ts_probe(struct platform_device *pdev) { struct iproc_ts_priv *priv; struct input_dev *idev; int irq; int error; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; /* touchscreen controller memory mapped regs via syscon*/ priv->regmap = syscon_regmap_lookup_by_phandle(pdev->dev.of_node, "ts_syscon"); if (IS_ERR(priv->regmap)) { error = PTR_ERR(priv->regmap); dev_err(&pdev->dev, "unable to map I/O memory:%d\n", error); return error; } priv->tsc_clk = devm_clk_get(&pdev->dev, "tsc_clk"); if (IS_ERR(priv->tsc_clk)) { error = PTR_ERR(priv->tsc_clk); dev_err(&pdev->dev, "failed getting clock tsc_clk: %d\n", error); return error; } priv->pdev = pdev; error = iproc_get_tsc_config(&pdev->dev, priv); if (error) { dev_err(&pdev->dev, "get_tsc_config failed: %d\n", error); return error; } idev = devm_input_allocate_device(&pdev->dev); if (!idev) { dev_err(&pdev->dev, "failed to allocate input device\n"); return -ENOMEM; } priv->idev = idev; priv->pen_status = PEN_UP_STATUS; /* Set input device info */ idev->name = IPROC_TS_NAME; idev->dev.parent = &pdev->dev; idev->id.bustype = BUS_HOST; idev->id.vendor = SERIO_UNKNOWN; idev->id.product = 0; idev->id.version = 0; idev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, idev->keybit); input_set_abs_params(idev, ABS_X, X_MIN, priv->cfg_params.max_x, priv->cfg_params.fuzz_x, 0); input_set_abs_params(idev, ABS_Y, Y_MIN, priv->cfg_params.max_y, priv->cfg_params.fuzz_y, 0); idev->open = iproc_ts_start; idev->close = iproc_ts_stop; input_set_drvdata(idev, priv); platform_set_drvdata(pdev, priv); /* get interrupt */ irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; error = devm_request_irq(&pdev->dev, irq, iproc_touchscreen_interrupt, IRQF_SHARED, IPROC_TS_NAME, pdev); if (error) return error; error = input_register_device(priv->idev); if (error) { dev_err(&pdev->dev, "failed to register input device: %d\n", error); return error; } return 0; } static const struct of_device_id iproc_ts_of_match[] = { {.compatible = "brcm,iproc-touchscreen", }, { }, }; MODULE_DEVICE_TABLE(of, iproc_ts_of_match); static struct platform_driver iproc_ts_driver = { .probe = iproc_ts_probe, .driver = { .name = IPROC_TS_NAME, .of_match_table = iproc_ts_of_match, }, }; module_platform_driver(iproc_ts_driver); MODULE_DESCRIPTION("IPROC Touchscreen driver"); MODULE_AUTHOR("Broadcom"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) Rockchip Electronics Co.Ltd * Author: * Algea Cao <[email protected]> */ #ifndef __DW_HDMI_QP_H__ #define __DW_HDMI_QP_H__ #include <linux/bits.h> /* Main Unit Registers */ #define CORE_ID 0x0 #define VER_NUMBER 0x4 #define VER_TYPE 0x8 #define CONFIG_REG 0xc #define CONFIG_CEC BIT(28) #define CONFIG_AUD_UD BIT(23) #define CORE_TIMESTAMP_HHMM 0x14 #define CORE_TIMESTAMP_MMDD 0x18 #define CORE_TIMESTAMP_YYYY 0x1c /* Reset Manager Registers */ #define GLOBAL_SWRESET_REQUEST 0x40 #define EARCRX_CMDC_SWINIT_P BIT(27) #define AVP_DATAPATH_PACKET_AUDIO_SWINIT_P BIT(10) #define GLOBAL_SWDISABLE 0x44 #define CEC_SWDISABLE BIT(17) #define AVP_DATAPATH_PACKET_AUDIO_SWDISABLE BIT(10) #define AVP_DATAPATH_VIDEO_SWDISABLE BIT(6) #define RESET_MANAGER_CONFIG0 0x48 #define RESET_MANAGER_STATUS0 0x50 #define RESET_MANAGER_STATUS1 0x54 #define RESET_MANAGER_STATUS2 0x58 /* Timer Base Registers */ #define TIMER_BASE_CONFIG0 0x80 #define TIMER_BASE_STATUS0 0x84 /* CMU Registers */ #define CMU_CONFIG0 0xa0 #define CMU_CONFIG1 0xa4 #define CMU_CONFIG2 0xa8 #define CMU_CONFIG3 0xac #define CMU_STATUS 0xb0 #define DISPLAY_CLK_MONITOR 0x3f #define DISPLAY_CLK_LOCKED 0X15 #define EARC_BPCLK_OFF BIT(9) #define AUDCLK_OFF BIT(7) #define LINKQPCLK_OFF BIT(5) #define VIDQPCLK_OFF BIT(3) #define IPI_CLK_OFF BIT(1) #define CMU_IPI_CLK_FREQ 0xb4 #define CMU_VIDQPCLK_FREQ 0xb8 #define CMU_LINKQPCLK_FREQ 0xbc #define CMU_AUDQPCLK_FREQ 0xc0 #define CMU_EARC_BPCLK_FREQ 0xc4 /* I2CM Registers */ #define I2CM_SM_SCL_CONFIG0 0xe0 #define I2CM_FM_SCL_CONFIG0 0xe4 #define I2CM_CONFIG0 0xe8 #define I2CM_CONTROL0 0xec #define I2CM_STATUS0 0xf0 #define I2CM_INTERFACE_CONTROL0 0xf4 #define I2CM_ADDR 0xff000 #define I2CM_SLVADDR 0xfe0 #define I2CM_WR_MASK 0x1e #define I2CM_EXT_READ BIT(4) #define I2CM_SHORT_READ BIT(3) #define I2CM_FM_READ BIT(2) #define I2CM_FM_WRITE BIT(1) #define I2CM_FM_EN BIT(0) #define I2CM_INTERFACE_CONTROL1 0xf8 #define I2CM_SEG_PTR 0x7f80 #define I2CM_SEG_ADDR 0x7f #define I2CM_INTERFACE_WRDATA_0_3 0xfc #define I2CM_INTERFACE_WRDATA_4_7 0x100 #define I2CM_INTERFACE_WRDATA_8_11 0x104 #define I2CM_INTERFACE_WRDATA_12_15 0x108 #define I2CM_INTERFACE_RDDATA_0_3 0x10c #define I2CM_INTERFACE_RDDATA_4_7 0x110 #define I2CM_INTERFACE_RDDATA_8_11 0x114 #define I2CM_INTERFACE_RDDATA_12_15 0x118 /* SCDC Registers */ #define SCDC_CONFIG0 0x140 #define SCDC_I2C_FM_EN BIT(12) #define SCDC_UPD_FLAGS_AUTO_CLR BIT(6) #define SCDC_UPD_FLAGS_POLL_EN BIT(4) #define SCDC_CONTROL0 0x148 #define SCDC_STATUS0 0x150 #define STATUS_UPDATE BIT(0) #define FRL_START BIT(4) #define FLT_UPDATE BIT(5) /* FLT Registers */ #define FLT_CONFIG0 0x160 #define FLT_CONFIG1 0x164 #define FLT_CONFIG2 0x168 #define FLT_CONTROL0 0x170 /* Main Unit 2 Registers */ #define MAINUNIT_STATUS0 0x180 /* Video Interface Registers */ #define VIDEO_INTERFACE_CONFIG0 0x800 #define VIDEO_INTERFACE_CONFIG1 0x804 #define VIDEO_INTERFACE_CONFIG2 0x808 #define VIDEO_INTERFACE_CONTROL0 0x80c #define VIDEO_INTERFACE_STATUS0 0x814 /* Video Packing Registers */ #define VIDEO_PACKING_CONFIG0 0x81c /* Audio Interface Registers */ #define AUDIO_INTERFACE_CONFIG0 0x820 #define AUD_IF_SEL_MSK 0x3 #define AUD_IF_SPDIF 0x2 #define AUD_IF_I2S 0x1 #define AUD_IF_PAI 0x0 #define AUD_FIFO_INIT_ON_OVF_MSK BIT(2) #define AUD_FIFO_INIT_ON_OVF_EN BIT(2) #define I2S_LINES_EN_MSK GENMASK(7, 4) #define I2S_LINES_EN(x) BIT((x) + 4) #define I2S_BPCUV_RCV_MSK BIT(12) #define I2S_BPCUV_RCV_EN BIT(12) #define I2S_BPCUV_RCV_DIS 0 #define SPDIF_LINES_EN GENMASK(19, 16) #define AUD_FORMAT_MSK GENMASK(26, 24) #define AUD_3DOBA (0x7 << 24) #define AUD_3DASP (0x6 << 24) #define AUD_MSOBA (0x5 << 24) #define AUD_MSASP (0x4 << 24) #define AUD_HBR (0x3 << 24) #define AUD_DST (0x2 << 24) #define AUD_OBA (0x1 << 24) #define AUD_ASP (0x0 << 24) #define AUDIO_INTERFACE_CONFIG1 0x824 #define AUDIO_INTERFACE_CONTROL0 0x82c #define AUDIO_FIFO_CLR_P BIT(0) #define AUDIO_INTERFACE_STATUS0 0x834 /* Frame Composer Registers */ #define FRAME_COMPOSER_CONFIG0 0x840 #define FRAME_COMPOSER_CONFIG1 0x844 #define FRAME_COMPOSER_CONFIG2 0x848 #define FRAME_COMPOSER_CONFIG3 0x84c #define FRAME_COMPOSER_CONFIG4 0x850 #define FRAME_COMPOSER_CONFIG5 0x854 #define FRAME_COMPOSER_CONFIG6 0x858 #define FRAME_COMPOSER_CONFIG7 0x85c #define FRAME_COMPOSER_CONFIG8 0x860 #define FRAME_COMPOSER_CONFIG9 0x864 #define FRAME_COMPOSER_CONTROL0 0x86c /* Video Monitor Registers */ #define VIDEO_MONITOR_CONFIG0 0x880 #define VIDEO_MONITOR_STATUS0 0x884 #define VIDEO_MONITOR_STATUS1 0x888 #define VIDEO_MONITOR_STATUS2 0x88c #define VIDEO_MONITOR_STATUS3 0x890 #define VIDEO_MONITOR_STATUS4 0x894 #define VIDEO_MONITOR_STATUS5 0x898 #define VIDEO_MONITOR_STATUS6 0x89c /* HDCP2 Logic Registers */ #define HDCP2LOGIC_CONFIG0 0x8e0 #define HDCP2_BYPASS BIT(0) #define HDCP2LOGIC_ESM_GPIO_IN 0x8e4 #define HDCP2LOGIC_ESM_GPIO_OUT 0x8e8 /* HDCP14 Registers */ #define HDCP14_CONFIG0 0x900 #define HDCP14_CONFIG1 0x904 #define HDCP14_CONFIG2 0x908 #define HDCP14_CONFIG3 0x90c #define HDCP14_KEY_SEED 0x914 #define HDCP14_KEY_H 0x918 #define HDCP14_KEY_L 0x91c #define HDCP14_KEY_STATUS 0x920 #define HDCP14_AKSV_H 0x924 #define HDCP14_AKSV_L 0x928 #define HDCP14_AN_H 0x92c #define HDCP14_AN_L 0x930 #define HDCP14_STATUS0 0x934 #define HDCP14_STATUS1 0x938 /* Scrambler Registers */ #define SCRAMB_CONFIG0 0x960 /* Video Configuration Registers */ #define LINK_CONFIG0 0x968 #define OPMODE_FRL_4LANES BIT(8) #define OPMODE_DVI BIT(4) #define OPMODE_FRL BIT(0) /* TMDS FIFO Registers */ #define TMDS_FIFO_CONFIG0 0x970 #define TMDS_FIFO_CONTROL0 0x974 /* FRL RSFEC Registers */ #define FRL_RSFEC_CONFIG0 0xa20 #define FRL_RSFEC_STATUS0 0xa30 /* FRL Packetizer Registers */ #define FRL_PKTZ_CONFIG0 0xa40 #define FRL_PKTZ_CONTROL0 0xa44 #define FRL_PKTZ_CONTROL1 0xa50 #define FRL_PKTZ_STATUS1 0xa54 /* Packet Scheduler Registers */ #define PKTSCHED_CONFIG0 0xa80 #define PKTSCHED_PRQUEUE0_CONFIG0 0xa84 #define PKTSCHED_PRQUEUE1_CONFIG0 0xa88 #define PKTSCHED_PRQUEUE2_CONFIG0 0xa8c #define PKTSCHED_PRQUEUE2_CONFIG1 0xa90 #define PKTSCHED_PRQUEUE2_CONFIG2 0xa94 #define PKTSCHED_PKT_CONFIG0 0xa98 #define PKTSCHED_PKT_CONFIG1 0xa9c #define PKTSCHED_DRMI_FIELDRATE BIT(13) #define PKTSCHED_AVI_FIELDRATE BIT(12) #define PKTSCHED_PKT_CONFIG2 0xaa0 #define PKTSCHED_PKT_CONFIG3 0xaa4 #define PKTSCHED_PKT_EN 0xaa8 #define PKTSCHED_DRMI_TX_EN BIT(17) #define PKTSCHED_AUDI_TX_EN BIT(15) #define PKTSCHED_AVI_TX_EN BIT(13) #define PKTSCHED_EMP_CVTEM_TX_EN BIT(10) #define PKTSCHED_AMD_TX_EN BIT(8) #define PKTSCHED_GCP_TX_EN BIT(3) #define PKTSCHED_AUDS_TX_EN BIT(2) #define PKTSCHED_ACR_TX_EN BIT(1) #define PKTSCHED_NULL_TX_EN BIT(0) #define PKTSCHED_PKT_CONTROL0 0xaac #define PKTSCHED_PKT_SEND 0xab0 #define PKTSCHED_PKT_STATUS0 0xab4 #define PKTSCHED_PKT_STATUS1 0xab8 #define PKT_NULL_CONTENTS0 0xb00 #define PKT_NULL_CONTENTS1 0xb04 #define PKT_NULL_CONTENTS2 0xb08 #define PKT_NULL_CONTENTS3 0xb0c #define PKT_NULL_CONTENTS4 0xb10 #define PKT_NULL_CONTENTS5 0xb14 #define PKT_NULL_CONTENTS6 0xb18 #define PKT_NULL_CONTENTS7 0xb1c #define PKT_ACP_CONTENTS0 0xb20 #define PKT_ACP_CONTENTS1 0xb24 #define PKT_ACP_CONTENTS2 0xb28 #define PKT_ACP_CONTENTS3 0xb2c #define PKT_ACP_CONTENTS4 0xb30 #define PKT_ACP_CONTENTS5 0xb34 #define PKT_ACP_CONTENTS6 0xb38 #define PKT_ACP_CONTENTS7 0xb3c #define PKT_ISRC1_CONTENTS0 0xb40 #define PKT_ISRC1_CONTENTS1 0xb44 #define PKT_ISRC1_CONTENTS2 0xb48 #define PKT_ISRC1_CONTENTS3 0xb4c #define PKT_ISRC1_CONTENTS4 0xb50 #define PKT_ISRC1_CONTENTS5 0xb54 #define PKT_ISRC1_CONTENTS6 0xb58 #define PKT_ISRC1_CONTENTS7 0xb5c #define PKT_ISRC2_CONTENTS0 0xb60 #define PKT_ISRC2_CONTENTS1 0xb64 #define PKT_ISRC2_CONTENTS2 0xb68 #define PKT_ISRC2_CONTENTS3 0xb6c #define PKT_ISRC2_CONTENTS4 0xb70 #define PKT_ISRC2_CONTENTS5 0xb74 #define PKT_ISRC2_CONTENTS6 0xb78 #define PKT_ISRC2_CONTENTS7 0xb7c #define PKT_GMD_CONTENTS0 0xb80 #define PKT_GMD_CONTENTS1 0xb84 #define PKT_GMD_CONTENTS2 0xb88 #define PKT_GMD_CONTENTS3 0xb8c #define PKT_GMD_CONTENTS4 0xb90 #define PKT_GMD_CONTENTS5 0xb94 #define PKT_GMD_CONTENTS6 0xb98 #define PKT_GMD_CONTENTS7 0xb9c #define PKT_AMD_CONTENTS0 0xba0 #define PKT_AMD_CONTENTS1 0xba4 #define PKT_AMD_CONTENTS2 0xba8 #define PKT_AMD_CONTENTS3 0xbac #define PKT_AMD_CONTENTS4 0xbb0 #define PKT_AMD_CONTENTS5 0xbb4 #define PKT_AMD_CONTENTS6 0xbb8 #define PKT_AMD_CONTENTS7 0xbbc #define PKT_VSI_CONTENTS0 0xbc0 #define PKT_VSI_CONTENTS1 0xbc4 #define PKT_VSI_CONTENTS2 0xbc8 #define PKT_VSI_CONTENTS3 0xbcc #define PKT_VSI_CONTENTS4 0xbd0 #define PKT_VSI_CONTENTS5 0xbd4 #define PKT_VSI_CONTENTS6 0xbd8 #define PKT_VSI_CONTENTS7 0xbdc #define PKT_AVI_CONTENTS0 0xbe0 #define HDMI_FC_AVICONF0_ACTIVE_FMT_INFO_PRESENT BIT(4) #define HDMI_FC_AVICONF0_BAR_DATA_VERT_BAR 0x04 #define HDMI_FC_AVICONF0_BAR_DATA_HORIZ_BAR 0x08 #define HDMI_FC_AVICONF2_IT_CONTENT_VALID 0x80 #define PKT_AVI_CONTENTS1 0xbe4 #define PKT_AVI_CONTENTS2 0xbe8 #define PKT_AVI_CONTENTS3 0xbec #define PKT_AVI_CONTENTS4 0xbf0 #define PKT_AVI_CONTENTS5 0xbf4 #define PKT_AVI_CONTENTS6 0xbf8 #define PKT_AVI_CONTENTS7 0xbfc #define PKT_SPDI_CONTENTS0 0xc00 #define PKT_SPDI_CONTENTS1 0xc04 #define PKT_SPDI_CONTENTS2 0xc08 #define PKT_SPDI_CONTENTS3 0xc0c #define PKT_SPDI_CONTENTS4 0xc10 #define PKT_SPDI_CONTENTS5 0xc14 #define PKT_SPDI_CONTENTS6 0xc18 #define PKT_SPDI_CONTENTS7 0xc1c #define PKT_AUDI_CONTENTS0 0xc20 #define PKT_AUDI_CONTENTS1 0xc24 #define PKT_AUDI_CONTENTS2 0xc28 #define PKT_AUDI_CONTENTS3 0xc2c #define PKT_AUDI_CONTENTS4 0xc30 #define PKT_AUDI_CONTENTS5 0xc34 #define PKT_AUDI_CONTENTS6 0xc38 #define PKT_AUDI_CONTENTS7 0xc3c #define PKT_NVI_CONTENTS0 0xc40 #define PKT_NVI_CONTENTS1 0xc44 #define PKT_NVI_CONTENTS2 0xc48 #define PKT_NVI_CONTENTS3 0xc4c #define PKT_NVI_CONTENTS4 0xc50 #define PKT_NVI_CONTENTS5 0xc54 #define PKT_NVI_CONTENTS6 0xc58 #define PKT_NVI_CONTENTS7 0xc5c #define PKT_DRMI_CONTENTS0 0xc60 #define PKT_DRMI_CONTENTS1 0xc64 #define PKT_DRMI_CONTENTS2 0xc68 #define PKT_DRMI_CONTENTS3 0xc6c #define PKT_DRMI_CONTENTS4 0xc70 #define PKT_DRMI_CONTENTS5 0xc74 #define PKT_DRMI_CONTENTS6 0xc78 #define PKT_DRMI_CONTENTS7 0xc7c #define PKT_GHDMI1_CONTENTS0 0xc80 #define PKT_GHDMI1_CONTENTS1 0xc84 #define PKT_GHDMI1_CONTENTS2 0xc88 #define PKT_GHDMI1_CONTENTS3 0xc8c #define PKT_GHDMI1_CONTENTS4 0xc90 #define PKT_GHDMI1_CONTENTS5 0xc94 #define PKT_GHDMI1_CONTENTS6 0xc98 #define PKT_GHDMI1_CONTENTS7 0xc9c #define PKT_GHDMI2_CONTENTS0 0xca0 #define PKT_GHDMI2_CONTENTS1 0xca4 #define PKT_GHDMI2_CONTENTS2 0xca8 #define PKT_GHDMI2_CONTENTS3 0xcac #define PKT_GHDMI2_CONTENTS4 0xcb0 #define PKT_GHDMI2_CONTENTS5 0xcb4 #define PKT_GHDMI2_CONTENTS6 0xcb8 #define PKT_GHDMI2_CONTENTS7 0xcbc /* EMP Packetizer Registers */ #define PKT_EMP_CONFIG0 0xce0 #define PKT_EMP_CONTROL0 0xcec #define PKT_EMP_CONTROL1 0xcf0 #define PKT_EMP_CONTROL2 0xcf4 #define PKT_EMP_VTEM_CONTENTS0 0xd00 #define PKT_EMP_VTEM_CONTENTS1 0xd04 #define PKT_EMP_VTEM_CONTENTS2 0xd08 #define PKT_EMP_VTEM_CONTENTS3 0xd0c #define PKT_EMP_VTEM_CONTENTS4 0xd10 #define PKT_EMP_VTEM_CONTENTS5 0xd14 #define PKT_EMP_VTEM_CONTENTS6 0xd18 #define PKT_EMP_VTEM_CONTENTS7 0xd1c #define PKT0_EMP_CVTEM_CONTENTS0 0xd20 #define PKT0_EMP_CVTEM_CONTENTS1 0xd24 #define PKT0_EMP_CVTEM_CONTENTS2 0xd28 #define PKT0_EMP_CVTEM_CONTENTS3 0xd2c #define PKT0_EMP_CVTEM_CONTENTS4 0xd30 #define PKT0_EMP_CVTEM_CONTENTS5 0xd34 #define PKT0_EMP_CVTEM_CONTENTS6 0xd38 #define PKT0_EMP_CVTEM_CONTENTS7 0xd3c #define PKT1_EMP_CVTEM_CONTENTS0 0xd40 #define PKT1_EMP_CVTEM_CONTENTS1 0xd44 #define PKT1_EMP_CVTEM_CONTENTS2 0xd48 #define PKT1_EMP_CVTEM_CONTENTS3 0xd4c #define PKT1_EMP_CVTEM_CONTENTS4 0xd50 #define PKT1_EMP_CVTEM_CONTENTS5 0xd54 #define PKT1_EMP_CVTEM_CONTENTS6 0xd58 #define PKT1_EMP_CVTEM_CONTENTS7 0xd5c #define PKT2_EMP_CVTEM_CONTENTS0 0xd60 #define PKT2_EMP_CVTEM_CONTENTS1 0xd64 #define PKT2_EMP_CVTEM_CONTENTS2 0xd68 #define PKT2_EMP_CVTEM_CONTENTS3 0xd6c #define PKT2_EMP_CVTEM_CONTENTS4 0xd70 #define PKT2_EMP_CVTEM_CONTENTS5 0xd74 #define PKT2_EMP_CVTEM_CONTENTS6 0xd78 #define PKT2_EMP_CVTEM_CONTENTS7 0xd7c #define PKT3_EMP_CVTEM_CONTENTS0 0xd80 #define PKT3_EMP_CVTEM_CONTENTS1 0xd84 #define PKT3_EMP_CVTEM_CONTENTS2 0xd88 #define PKT3_EMP_CVTEM_CONTENTS3 0xd8c #define PKT3_EMP_CVTEM_CONTENTS4 0xd90 #define PKT3_EMP_CVTEM_CONTENTS5 0xd94 #define PKT3_EMP_CVTEM_CONTENTS6 0xd98 #define PKT3_EMP_CVTEM_CONTENTS7 0xd9c #define PKT4_EMP_CVTEM_CONTENTS0 0xda0 #define PKT4_EMP_CVTEM_CONTENTS1 0xda4 #define PKT4_EMP_CVTEM_CONTENTS2 0xda8 #define PKT4_EMP_CVTEM_CONTENTS3 0xdac #define PKT4_EMP_CVTEM_CONTENTS4 0xdb0 #define PKT4_EMP_CVTEM_CONTENTS5 0xdb4 #define PKT4_EMP_CVTEM_CONTENTS6 0xdb8 #define PKT4_EMP_CVTEM_CONTENTS7 0xdbc #define PKT5_EMP_CVTEM_CONTENTS0 0xdc0 #define PKT5_EMP_CVTEM_CONTENTS1 0xdc4 #define PKT5_EMP_CVTEM_CONTENTS2 0xdc8 #define PKT5_EMP_CVTEM_CONTENTS3 0xdcc #define PKT5_EMP_CVTEM_CONTENTS4 0xdd0 #define PKT5_EMP_CVTEM_CONTENTS5 0xdd4 #define PKT5_EMP_CVTEM_CONTENTS6 0xdd8 #define PKT5_EMP_CVTEM_CONTENTS7 0xddc /* Audio Packetizer Registers */ #define AUDPKT_CONTROL0 0xe20 #define AUDPKT_PBIT_FORCE_EN_MASK BIT(12) #define AUDPKT_PBIT_FORCE_EN BIT(12) #define AUDPKT_CHSTATUS_OVR_EN_MASK BIT(0) #define AUDPKT_CHSTATUS_OVR_EN BIT(0) #define AUDPKT_CONTROL1 0xe24 #define AUDPKT_ACR_CONTROL0 0xe40 #define AUDPKT_ACR_N_VALUE 0xfffff #define AUDPKT_ACR_CONTROL1 0xe44 #define AUDPKT_ACR_CTS_OVR_VAL_MSK GENMASK(23, 4) #define AUDPKT_ACR_CTS_OVR_VAL(x) ((x) << 4) #define AUDPKT_ACR_CTS_OVR_EN_MSK BIT(1) #define AUDPKT_ACR_CTS_OVR_EN BIT(1) #define AUDPKT_ACR_STATUS0 0xe4c #define AUDPKT_CHSTATUS_OVR0 0xe60 #define AUDPKT_CHSTATUS_OVR1 0xe64 /* IEC60958 Byte 3: Sampleing frenuency Bits 24 to 27 */ #define AUDPKT_CHSTATUS_SR_MASK GENMASK(3, 0) #define AUDPKT_CHSTATUS_SR_22050 0x4 #define AUDPKT_CHSTATUS_SR_24000 0x6 #define AUDPKT_CHSTATUS_SR_32000 0x3 #define AUDPKT_CHSTATUS_SR_44100 0x0 #define AUDPKT_CHSTATUS_SR_48000 0x2 #define AUDPKT_CHSTATUS_SR_88200 0x8 #define AUDPKT_CHSTATUS_SR_96000 0xa #define AUDPKT_CHSTATUS_SR_176400 0xc #define AUDPKT_CHSTATUS_SR_192000 0xe #define AUDPKT_CHSTATUS_SR_768000 0x9 #define AUDPKT_CHSTATUS_SR_NOT_INDICATED 0x1 /* IEC60958 Byte 4: Original Sampleing frenuency Bits 36 to 39 */ #define AUDPKT_CHSTATUS_0SR_MASK GENMASK(15, 12) #define AUDPKT_CHSTATUS_OSR_8000 0x6 #define AUDPKT_CHSTATUS_OSR_11025 0xa #define AUDPKT_CHSTATUS_OSR_12000 0x2 #define AUDPKT_CHSTATUS_OSR_16000 0x8 #define AUDPKT_CHSTATUS_OSR_22050 0xb #define AUDPKT_CHSTATUS_OSR_24000 0x9 #define AUDPKT_CHSTATUS_OSR_32000 0xc #define AUDPKT_CHSTATUS_OSR_44100 0xf #define AUDPKT_CHSTATUS_OSR_48000 0xd #define AUDPKT_CHSTATUS_OSR_88200 0x7 #define AUDPKT_CHSTATUS_OSR_96000 0x5 #define AUDPKT_CHSTATUS_OSR_176400 0x3 #define AUDPKT_CHSTATUS_OSR_192000 0x1 #define AUDPKT_CHSTATUS_OSR_NOT_INDICATED 0x0 #define AUDPKT_CHSTATUS_OVR2 0xe68 #define AUDPKT_CHSTATUS_OVR3 0xe6c #define AUDPKT_CHSTATUS_OVR4 0xe70 #define AUDPKT_CHSTATUS_OVR5 0xe74 #define AUDPKT_CHSTATUS_OVR6 0xe78 #define AUDPKT_CHSTATUS_OVR7 0xe7c #define AUDPKT_CHSTATUS_OVR8 0xe80 #define AUDPKT_CHSTATUS_OVR9 0xe84 #define AUDPKT_CHSTATUS_OVR10 0xe88 #define AUDPKT_CHSTATUS_OVR11 0xe8c #define AUDPKT_CHSTATUS_OVR12 0xe90 #define AUDPKT_CHSTATUS_OVR13 0xe94 #define AUDPKT_CHSTATUS_OVR14 0xe98 #define AUDPKT_USRDATA_OVR_MSG_GENERIC0 0xea0 #define AUDPKT_USRDATA_OVR_MSG_GENERIC1 0xea4 #define AUDPKT_USRDATA_OVR_MSG_GENERIC2 0xea8 #define AUDPKT_USRDATA_OVR_MSG_GENERIC3 0xeac #define AUDPKT_USRDATA_OVR_MSG_GENERIC4 0xeb0 #define AUDPKT_USRDATA_OVR_MSG_GENERIC5 0xeb4 #define AUDPKT_USRDATA_OVR_MSG_GENERIC6 0xeb8 #define AUDPKT_USRDATA_OVR_MSG_GENERIC7 0xebc #define AUDPKT_USRDATA_OVR_MSG_GENERIC8 0xec0 #define AUDPKT_USRDATA_OVR_MSG_GENERIC9 0xec4 #define AUDPKT_USRDATA_OVR_MSG_GENERIC10 0xec8 #define AUDPKT_USRDATA_OVR_MSG_GENERIC11 0xecc #define AUDPKT_USRDATA_OVR_MSG_GENERIC12 0xed0 #define AUDPKT_USRDATA_OVR_MSG_GENERIC13 0xed4 #define AUDPKT_USRDATA_OVR_MSG_GENERIC14 0xed8 #define AUDPKT_USRDATA_OVR_MSG_GENERIC15 0xedc #define AUDPKT_USRDATA_OVR_MSG_GENERIC16 0xee0 #define AUDPKT_USRDATA_OVR_MSG_GENERIC17 0xee4 #define AUDPKT_USRDATA_OVR_MSG_GENERIC18 0xee8 #define AUDPKT_USRDATA_OVR_MSG_GENERIC19 0xeec #define AUDPKT_USRDATA_OVR_MSG_GENERIC20 0xef0 #define AUDPKT_USRDATA_OVR_MSG_GENERIC21 0xef4 #define AUDPKT_USRDATA_OVR_MSG_GENERIC22 0xef8 #define AUDPKT_USRDATA_OVR_MSG_GENERIC23 0xefc #define AUDPKT_USRDATA_OVR_MSG_GENERIC24 0xf00 #define AUDPKT_USRDATA_OVR_MSG_GENERIC25 0xf04 #define AUDPKT_USRDATA_OVR_MSG_GENERIC26 0xf08 #define AUDPKT_USRDATA_OVR_MSG_GENERIC27 0xf0c #define AUDPKT_USRDATA_OVR_MSG_GENERIC28 0xf10 #define AUDPKT_USRDATA_OVR_MSG_GENERIC29 0xf14 #define AUDPKT_USRDATA_OVR_MSG_GENERIC30 0xf18 #define AUDPKT_USRDATA_OVR_MSG_GENERIC31 0xf1c #define AUDPKT_USRDATA_OVR_MSG_GENERIC32 0xf20 #define AUDPKT_VBIT_OVR0 0xf24 /* CEC Registers */ #define CEC_TX_CONTROL 0x1000 #define CEC_STATUS 0x1004 #define CEC_CONFIG 0x1008 #define CEC_ADDR 0x100c #define CEC_TX_COUNT 0x1020 #define CEC_TX_DATA3_0 0x1024 #define CEC_TX_DATA7_4 0x1028 #define CEC_TX_DATA11_8 0x102c #define CEC_TX_DATA15_12 0x1030 #define CEC_RX_COUNT_STATUS 0x1040 #define CEC_RX_DATA3_0 0x1044 #define CEC_RX_DATA7_4 0x1048 #define CEC_RX_DATA11_8 0x104c #define CEC_RX_DATA15_12 0x1050 #define CEC_LOCK_CONTROL 0x1054 #define CEC_RXQUAL_BITTIME_CONFIG 0x1060 #define CEC_RX_BITTIME_CONFIG 0x1064 #define CEC_TX_BITTIME_CONFIG 0x1068 /* eARC RX CMDC Registers */ #define EARCRX_CMDC_CONFIG0 0x1800 #define EARCRX_XACTREAD_STOP_CFG BIT(26) #define EARCRX_XACTREAD_RETRY_CFG BIT(25) #define EARCRX_CMDC_DSCVR_EARCVALID0_TO_DISC1 BIT(24) #define EARCRX_CMDC_XACT_RESTART_EN BIT(18) #define EARCRX_CMDC_CONFIG1 0x1804 #define EARCRX_CMDC_CONTROL 0x1808 #define EARCRX_CMDC_HEARTBEAT_LOSS_EN BIT(4) #define EARCRX_CMDC_DISCOVERY_EN BIT(3) #define EARCRX_CONNECTOR_HPD BIT(1) #define EARCRX_CMDC_WHITELIST0_CONFIG 0x180c #define EARCRX_CMDC_WHITELIST1_CONFIG 0x1810 #define EARCRX_CMDC_WHITELIST2_CONFIG 0x1814 #define EARCRX_CMDC_WHITELIST3_CONFIG 0x1818 #define EARCRX_CMDC_STATUS 0x181c #define EARCRX_CMDC_XACT_INFO 0x1820 #define EARCRX_CMDC_XACT_ACTION 0x1824 #define EARCRX_CMDC_HEARTBEAT_RXSTAT_SE 0x1828 #define EARCRX_CMDC_HEARTBEAT_STATUS 0x182c #define EARCRX_CMDC_XACT_WR0 0x1840 #define EARCRX_CMDC_XACT_WR1 0x1844 #define EARCRX_CMDC_XACT_WR2 0x1848 #define EARCRX_CMDC_XACT_WR3 0x184c #define EARCRX_CMDC_XACT_WR4 0x1850 #define EARCRX_CMDC_XACT_WR5 0x1854 #define EARCRX_CMDC_XACT_WR6 0x1858 #define EARCRX_CMDC_XACT_WR7 0x185c #define EARCRX_CMDC_XACT_WR8 0x1860 #define EARCRX_CMDC_XACT_WR9 0x1864 #define EARCRX_CMDC_XACT_WR10 0x1868 #define EARCRX_CMDC_XACT_WR11 0x186c #define EARCRX_CMDC_XACT_WR12 0x1870 #define EARCRX_CMDC_XACT_WR13 0x1874 #define EARCRX_CMDC_XACT_WR14 0x1878 #define EARCRX_CMDC_XACT_WR15 0x187c #define EARCRX_CMDC_XACT_WR16 0x1880 #define EARCRX_CMDC_XACT_WR17 0x1884 #define EARCRX_CMDC_XACT_WR18 0x1888 #define EARCRX_CMDC_XACT_WR19 0x188c #define EARCRX_CMDC_XACT_WR20 0x1890 #define EARCRX_CMDC_XACT_WR21 0x1894 #define EARCRX_CMDC_XACT_WR22 0x1898 #define EARCRX_CMDC_XACT_WR23 0x189c #define EARCRX_CMDC_XACT_WR24 0x18a0 #define EARCRX_CMDC_XACT_WR25 0x18a4 #define EARCRX_CMDC_XACT_WR26 0x18a8 #define EARCRX_CMDC_XACT_WR27 0x18ac #define EARCRX_CMDC_XACT_WR28 0x18b0 #define EARCRX_CMDC_XACT_WR29 0x18b4 #define EARCRX_CMDC_XACT_WR30 0x18b8 #define EARCRX_CMDC_XACT_WR31 0x18bc #define EARCRX_CMDC_XACT_WR32 0x18c0 #define EARCRX_CMDC_XACT_WR33 0x18c4 #define EARCRX_CMDC_XACT_WR34 0x18c8 #define EARCRX_CMDC_XACT_WR35 0x18cc #define EARCRX_CMDC_XACT_WR36 0x18d0 #define EARCRX_CMDC_XACT_WR37 0x18d4 #define EARCRX_CMDC_XACT_WR38 0x18d8 #define EARCRX_CMDC_XACT_WR39 0x18dc #define EARCRX_CMDC_XACT_WR40 0x18e0 #define EARCRX_CMDC_XACT_WR41 0x18e4 #define EARCRX_CMDC_XACT_WR42 0x18e8 #define EARCRX_CMDC_XACT_WR43 0x18ec #define EARCRX_CMDC_XACT_WR44 0x18f0 #define EARCRX_CMDC_XACT_WR45 0x18f4 #define EARCRX_CMDC_XACT_WR46 0x18f8 #define EARCRX_CMDC_XACT_WR47 0x18fc #define EARCRX_CMDC_XACT_WR48 0x1900 #define EARCRX_CMDC_XACT_WR49 0x1904 #define EARCRX_CMDC_XACT_WR50 0x1908 #define EARCRX_CMDC_XACT_WR51 0x190c #define EARCRX_CMDC_XACT_WR52 0x1910 #define EARCRX_CMDC_XACT_WR53 0x1914 #define EARCRX_CMDC_XACT_WR54 0x1918 #define EARCRX_CMDC_XACT_WR55 0x191c #define EARCRX_CMDC_XACT_WR56 0x1920 #define EARCRX_CMDC_XACT_WR57 0x1924 #define EARCRX_CMDC_XACT_WR58 0x1928 #define EARCRX_CMDC_XACT_WR59 0x192c #define EARCRX_CMDC_XACT_WR60 0x1930 #define EARCRX_CMDC_XACT_WR61 0x1934 #define EARCRX_CMDC_XACT_WR62 0x1938 #define EARCRX_CMDC_XACT_WR63 0x193c #define EARCRX_CMDC_XACT_WR64 0x1940 #define EARCRX_CMDC_XACT_RD0 0x1960 #define EARCRX_CMDC_XACT_RD1 0x1964 #define EARCRX_CMDC_XACT_RD2 0x1968 #define EARCRX_CMDC_XACT_RD3 0x196c #define EARCRX_CMDC_XACT_RD4 0x1970 #define EARCRX_CMDC_XACT_RD5 0x1974 #define EARCRX_CMDC_XACT_RD6 0x1978 #define EARCRX_CMDC_XACT_RD7 0x197c #define EARCRX_CMDC_XACT_RD8 0x1980 #define EARCRX_CMDC_XACT_RD9 0x1984 #define EARCRX_CMDC_XACT_RD10 0x1988 #define EARCRX_CMDC_XACT_RD11 0x198c #define EARCRX_CMDC_XACT_RD12 0x1990 #define EARCRX_CMDC_XACT_RD13 0x1994 #define EARCRX_CMDC_XACT_RD14 0x1998 #define EARCRX_CMDC_XACT_RD15 0x199c #define EARCRX_CMDC_XACT_RD16 0x19a0 #define EARCRX_CMDC_XACT_RD17 0x19a4 #define EARCRX_CMDC_XACT_RD18 0x19a8 #define EARCRX_CMDC_XACT_RD19 0x19ac #define EARCRX_CMDC_XACT_RD20 0x19b0 #define EARCRX_CMDC_XACT_RD21 0x19b4 #define EARCRX_CMDC_XACT_RD22 0x19b8 #define EARCRX_CMDC_XACT_RD23 0x19bc #define EARCRX_CMDC_XACT_RD24 0x19c0 #define EARCRX_CMDC_XACT_RD25 0x19c4 #define EARCRX_CMDC_XACT_RD26 0x19c8 #define EARCRX_CMDC_XACT_RD27 0x19cc #define EARCRX_CMDC_XACT_RD28 0x19d0 #define EARCRX_CMDC_XACT_RD29 0x19d4 #define EARCRX_CMDC_XACT_RD30 0x19d8 #define EARCRX_CMDC_XACT_RD31 0x19dc #define EARCRX_CMDC_XACT_RD32 0x19e0 #define EARCRX_CMDC_XACT_RD33 0x19e4 #define EARCRX_CMDC_XACT_RD34 0x19e8 #define EARCRX_CMDC_XACT_RD35 0x19ec #define EARCRX_CMDC_XACT_RD36 0x19f0 #define EARCRX_CMDC_XACT_RD37 0x19f4 #define EARCRX_CMDC_XACT_RD38 0x19f8 #define EARCRX_CMDC_XACT_RD39 0x19fc #define EARCRX_CMDC_XACT_RD40 0x1a00 #define EARCRX_CMDC_XACT_RD41 0x1a04 #define EARCRX_CMDC_XACT_RD42 0x1a08 #define EARCRX_CMDC_XACT_RD43 0x1a0c #define EARCRX_CMDC_XACT_RD44 0x1a10 #define EARCRX_CMDC_XACT_RD45 0x1a14 #define EARCRX_CMDC_XACT_RD46 0x1a18 #define EARCRX_CMDC_XACT_RD47 0x1a1c #define EARCRX_CMDC_XACT_RD48 0x1a20 #define EARCRX_CMDC_XACT_RD49 0x1a24 #define EARCRX_CMDC_XACT_RD50 0x1a28 #define EARCRX_CMDC_XACT_RD51 0x1a2c #define EARCRX_CMDC_XACT_RD52 0x1a30 #define EARCRX_CMDC_XACT_RD53 0x1a34 #define EARCRX_CMDC_XACT_RD54 0x1a38 #define EARCRX_CMDC_XACT_RD55 0x1a3c #define EARCRX_CMDC_XACT_RD56 0x1a40 #define EARCRX_CMDC_XACT_RD57 0x1a44 #define EARCRX_CMDC_XACT_RD58 0x1a48 #define EARCRX_CMDC_XACT_RD59 0x1a4c #define EARCRX_CMDC_XACT_RD60 0x1a50 #define EARCRX_CMDC_XACT_RD61 0x1a54 #define EARCRX_CMDC_XACT_RD62 0x1a58 #define EARCRX_CMDC_XACT_RD63 0x1a5c #define EARCRX_CMDC_XACT_RD64 0x1a60 #define EARCRX_CMDC_SYNC_CONFIG 0x1b00 /* eARC RX DMAC Registers */ #define EARCRX_DMAC_PHY_CONTROL 0x1c00 #define EARCRX_DMAC_CONFIG 0x1c08 #define EARCRX_DMAC_CONTROL0 0x1c0c #define EARCRX_DMAC_AUDIO_EN BIT(1) #define EARCRX_DMAC_EN BIT(0) #define EARCRX_DMAC_CONTROL1 0x1c10 #define EARCRX_DMAC_STATUS 0x1c14 #define EARCRX_DMAC_CHSTATUS0 0x1c18 #define EARCRX_DMAC_CHSTATUS1 0x1c1c #define EARCRX_DMAC_CHSTATUS2 0x1c20 #define EARCRX_DMAC_CHSTATUS3 0x1c24 #define EARCRX_DMAC_CHSTATUS4 0x1c28 #define EARCRX_DMAC_CHSTATUS5 0x1c2c #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC0 0x1c30 #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC1 0x1c34 #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC2 0x1c38 #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC3 0x1c3c #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC4 0x1c40 #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC5 0x1c44 #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC6 0x1c48 #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC7 0x1c4c #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC8 0x1c50 #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC9 0x1c54 #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC10 0x1c58 #define EARCRX_DMAC_USRDATA_MSG_HDMI_AC11 0x1c5c #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT0 0x1c60 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT1 0x1c64 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT2 0x1c68 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT3 0x1c6c #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT4 0x1c70 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT5 0x1c74 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT6 0x1c78 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT7 0x1c7c #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT8 0x1c80 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT9 0x1c84 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT10 0x1c88 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC1_PKT11 0x1c8c #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT0 0x1c90 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT1 0x1c94 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT2 0x1c98 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT3 0x1c9c #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT4 0x1ca0 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT5 0x1ca4 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT6 0x1ca8 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT7 0x1cac #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT8 0x1cb0 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT9 0x1cb4 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT10 0x1cb8 #define EARCRX_DMAC_USRDATA_MSG_HDMI_ISRC2_PKT11 0x1cbc #define EARCRX_DMAC_USRDATA_MSG_GENERIC0 0x1cc0 #define EARCRX_DMAC_USRDATA_MSG_GENERIC1 0x1cc4 #define EARCRX_DMAC_USRDATA_MSG_GENERIC2 0x1cc8 #define EARCRX_DMAC_USRDATA_MSG_GENERIC3 0x1ccc #define EARCRX_DMAC_USRDATA_MSG_GENERIC4 0x1cd0 #define EARCRX_DMAC_USRDATA_MSG_GENERIC5 0x1cd4 #define EARCRX_DMAC_USRDATA_MSG_GENERIC6 0x1cd8 #define EARCRX_DMAC_USRDATA_MSG_GENERIC7 0x1cdc #define EARCRX_DMAC_USRDATA_MSG_GENERIC8 0x1ce0 #define EARCRX_DMAC_USRDATA_MSG_GENERIC9 0x1ce4 #define EARCRX_DMAC_USRDATA_MSG_GENERIC10 0x1ce8 #define EARCRX_DMAC_USRDATA_MSG_GENERIC11 0x1cec #define EARCRX_DMAC_USRDATA_MSG_GENERIC12 0x1cf0 #define EARCRX_DMAC_USRDATA_MSG_GENERIC13 0x1cf4 #define EARCRX_DMAC_USRDATA_MSG_GENERIC14 0x1cf8 #define EARCRX_DMAC_USRDATA_MSG_GENERIC15 0x1cfc #define EARCRX_DMAC_USRDATA_MSG_GENERIC16 0x1d00 #define EARCRX_DMAC_USRDATA_MSG_GENERIC17 0x1d04 #define EARCRX_DMAC_USRDATA_MSG_GENERIC18 0x1d08 #define EARCRX_DMAC_USRDATA_MSG_GENERIC19 0x1d0c #define EARCRX_DMAC_USRDATA_MSG_GENERIC20 0x1d10 #define EARCRX_DMAC_USRDATA_MSG_GENERIC21 0x1d14 #define EARCRX_DMAC_USRDATA_MSG_GENERIC22 0x1d18 #define EARCRX_DMAC_USRDATA_MSG_GENERIC23 0x1d1c #define EARCRX_DMAC_USRDATA_MSG_GENERIC24 0x1d20 #define EARCRX_DMAC_USRDATA_MSG_GENERIC25 0x1d24 #define EARCRX_DMAC_USRDATA_MSG_GENERIC26 0x1d28 #define EARCRX_DMAC_USRDATA_MSG_GENERIC27 0x1d2c #define EARCRX_DMAC_USRDATA_MSG_GENERIC28 0x1d30 #define EARCRX_DMAC_USRDATA_MSG_GENERIC29 0x1d34 #define EARCRX_DMAC_USRDATA_MSG_GENERIC30 0x1d38 #define EARCRX_DMAC_USRDATA_MSG_GENERIC31 0x1d3c #define EARCRX_DMAC_USRDATA_MSG_GENERIC32 0x1d40 #define EARCRX_DMAC_CHSTATUS_STREAMER0 0x1d44 #define EARCRX_DMAC_CHSTATUS_STREAMER1 0x1d48 #define EARCRX_DMAC_CHSTATUS_STREAMER2 0x1d4c #define EARCRX_DMAC_CHSTATUS_STREAMER3 0x1d50 #define EARCRX_DMAC_CHSTATUS_STREAMER4 0x1d54 #define EARCRX_DMAC_CHSTATUS_STREAMER5 0x1d58 #define EARCRX_DMAC_CHSTATUS_STREAMER6 0x1d5c #define EARCRX_DMAC_CHSTATUS_STREAMER7 0x1d60 #define EARCRX_DMAC_CHSTATUS_STREAMER8 0x1d64 #define EARCRX_DMAC_CHSTATUS_STREAMER9 0x1d68 #define EARCRX_DMAC_CHSTATUS_STREAMER10 0x1d6c #define EARCRX_DMAC_CHSTATUS_STREAMER11 0x1d70 #define EARCRX_DMAC_CHSTATUS_STREAMER12 0x1d74 #define EARCRX_DMAC_CHSTATUS_STREAMER13 0x1d78 #define EARCRX_DMAC_CHSTATUS_STREAMER14 0x1d7c #define EARCRX_DMAC_USRDATA_STREAMER0 0x1d80 /* Main Unit Interrupt Registers */ #define MAIN_INTVEC_INDEX 0x3000 #define MAINUNIT_0_INT_STATUS 0x3010 #define MAINUNIT_0_INT_MASK_N 0x3014 #define MAINUNIT_0_INT_CLEAR 0x3018 #define MAINUNIT_0_INT_FORCE 0x301c #define MAINUNIT_1_INT_STATUS 0x3020 #define FLT_EXIT_TO_LTSL_IRQ BIT(22) #define FLT_EXIT_TO_LTS4_IRQ BIT(21) #define FLT_EXIT_TO_LTSP_IRQ BIT(20) #define SCDC_NACK_RCVD_IRQ BIT(12) #define SCDC_RR_REPLY_STOP_IRQ BIT(11) #define SCDC_UPD_FLAGS_CLR_IRQ BIT(10) #define SCDC_UPD_FLAGS_CHG_IRQ BIT(9) #define SCDC_UPD_FLAGS_RD_IRQ BIT(8) #define I2CM_NACK_RCVD_IRQ BIT(2) #define I2CM_READ_REQUEST_IRQ BIT(1) #define I2CM_OP_DONE_IRQ BIT(0) #define MAINUNIT_1_INT_MASK_N 0x3024 #define I2CM_NACK_RCVD_MASK_N BIT(2) #define I2CM_READ_REQUEST_MASK_N BIT(1) #define I2CM_OP_DONE_MASK_N BIT(0) #define MAINUNIT_1_INT_CLEAR 0x3028 #define I2CM_NACK_RCVD_CLEAR BIT(2) #define I2CM_READ_REQUEST_CLEAR BIT(1) #define I2CM_OP_DONE_CLEAR BIT(0) #define MAINUNIT_1_INT_FORCE 0x302c /* AVPUNIT Interrupt Registers */ #define AVP_INTVEC_INDEX 0x3800 #define AVP_0_INT_STATUS 0x3810 #define AVP_0_INT_MASK_N 0x3814 #define AVP_0_INT_CLEAR 0x3818 #define AVP_0_INT_FORCE 0x381c #define AVP_1_INT_STATUS 0x3820 #define AVP_1_INT_MASK_N 0x3824 #define HDCP14_AUTH_CHG_MASK_N BIT(6) #define AVP_1_INT_CLEAR 0x3828 #define AVP_1_INT_FORCE 0x382c #define AVP_2_INT_STATUS 0x3830 #define AVP_2_INT_MASK_N 0x3834 #define AVP_2_INT_CLEAR 0x3838 #define AVP_2_INT_FORCE 0x383c #define AVP_3_INT_STATUS 0x3840 #define AVP_3_INT_MASK_N 0x3844 #define AVP_3_INT_CLEAR 0x3848 #define AVP_3_INT_FORCE 0x384c #define AVP_4_INT_STATUS 0x3850 #define AVP_4_INT_MASK_N 0x3854 #define AVP_4_INT_CLEAR 0x3858 #define AVP_4_INT_FORCE 0x385c #define AVP_5_INT_STATUS 0x3860 #define AVP_5_INT_MASK_N 0x3864 #define AVP_5_INT_CLEAR 0x3868 #define AVP_5_INT_FORCE 0x386c #define AVP_6_INT_STATUS 0x3870 #define AVP_6_INT_MASK_N 0x3874 #define AVP_6_INT_CLEAR 0x3878 #define AVP_6_INT_FORCE 0x387c /* CEC Interrupt Registers */ #define CEC_INT_STATUS 0x4000 #define CEC_INT_MASK_N 0x4004 #define CEC_INT_CLEAR 0x4008 #define CEC_INT_FORCE 0x400c /* eARC RX Interrupt Registers */ #define EARCRX_INTVEC_INDEX 0x4800 #define EARCRX_0_INT_STATUS 0x4810 #define EARCRX_CMDC_DISCOVERY_TIMEOUT_IRQ BIT(9) #define EARCRX_CMDC_DISCOVERY_DONE_IRQ BIT(8) #define EARCRX_0_INT_MASK_N 0x4814 #define EARCRX_0_INT_CLEAR 0x4818 #define EARCRX_0_INT_FORCE 0x481c #define EARCRX_1_INT_STATUS 0x4820 #define EARCRX_1_INT_MASK_N 0x4824 #define EARCRX_1_INT_CLEAR 0x4828 #define EARCRX_1_INT_FORCE 0x482c #endif /* __DW_HDMI_QP_H__ */
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. */ #include <asm/div64.h> #include <linux/interconnect-provider.h> #include <linux/list_sort.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <soc/qcom/rpmh.h> #include <soc/qcom/tcs.h> #include "bcm-voter.h" #include "icc-rpmh.h" static LIST_HEAD(bcm_voters); static DEFINE_MUTEX(bcm_voter_lock); /** * struct bcm_voter - Bus Clock Manager voter * @dev: reference to the device that communicates with the BCM * @np: reference to the device node to match bcm voters * @lock: mutex to protect commit and wake/sleep lists in the voter * @commit_list: list containing bcms to be committed to hardware * @ws_list: list containing bcms that have different wake/sleep votes * @voter_node: list of bcm voters * @tcs_wait: mask for which buckets require TCS completion */ struct bcm_voter { struct device *dev; struct device_node *np; struct mutex lock; struct list_head commit_list; struct list_head ws_list; struct list_head voter_node; u32 tcs_wait; }; static int cmp_vcd(void *priv, const struct list_head *a, const struct list_head *b) { const struct qcom_icc_bcm *bcm_a = list_entry(a, struct qcom_icc_bcm, list); const struct qcom_icc_bcm *bcm_b = list_entry(b, struct qcom_icc_bcm, list); return bcm_a->aux_data.vcd - bcm_b->aux_data.vcd; } static u64 bcm_div(u64 num, u32 base) { /* Ensure that small votes aren't lost. */ if (num && num < base) return 1; do_div(num, base); return num; } /* BCMs with enable_mask use one-hot-encoding for on/off signaling */ static void bcm_aggregate_mask(struct qcom_icc_bcm *bcm) { struct qcom_icc_node *node; int bucket, i; for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { bcm->vote_x[bucket] = 0; bcm->vote_y[bucket] = 0; for (i = 0; i < bcm->num_nodes; i++) { node = bcm->nodes[i]; /* If any vote in this bucket exists, keep the BCM enabled */ if (node->sum_avg[bucket] || node->max_peak[bucket]) { bcm->vote_x[bucket] = 0; bcm->vote_y[bucket] = bcm->enable_mask; break; } } } if (bcm->keepalive) { bcm->vote_x[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask; bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask; bcm->vote_y[QCOM_ICC_BUCKET_AMC] = bcm->enable_mask; bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = bcm->enable_mask; } } static void bcm_aggregate(struct qcom_icc_bcm *bcm) { struct qcom_icc_node *node; size_t i, bucket; u64 agg_avg[QCOM_ICC_NUM_BUCKETS] = {0}; u64 agg_peak[QCOM_ICC_NUM_BUCKETS] = {0}; u64 temp; for (bucket = 0; bucket < QCOM_ICC_NUM_BUCKETS; bucket++) { for (i = 0; i < bcm->num_nodes; i++) { node = bcm->nodes[i]; temp = bcm_div(node->sum_avg[bucket] * bcm->aux_data.width, node->buswidth * node->channels); agg_avg[bucket] = max(agg_avg[bucket], temp); temp = bcm_div(node->max_peak[bucket] * bcm->aux_data.width, node->buswidth); agg_peak[bucket] = max(agg_peak[bucket], temp); } temp = agg_avg[bucket] * bcm->vote_scale; bcm->vote_x[bucket] = bcm_div(temp, bcm->aux_data.unit); temp = agg_peak[bucket] * bcm->vote_scale; bcm->vote_y[bucket] = bcm_div(temp, bcm->aux_data.unit); } if (bcm->keepalive && bcm->vote_x[QCOM_ICC_BUCKET_AMC] == 0 && bcm->vote_y[QCOM_ICC_BUCKET_AMC] == 0) { bcm->vote_x[QCOM_ICC_BUCKET_AMC] = 1; bcm->vote_x[QCOM_ICC_BUCKET_WAKE] = 1; bcm->vote_y[QCOM_ICC_BUCKET_AMC] = 1; bcm->vote_y[QCOM_ICC_BUCKET_WAKE] = 1; } } static inline void tcs_cmd_gen(struct tcs_cmd *cmd, u64 vote_x, u64 vote_y, u32 addr, bool commit, bool wait) { bool valid = true; if (!cmd) return; memset(cmd, 0, sizeof(*cmd)); if (vote_x == 0 && vote_y == 0) valid = false; if (vote_x > BCM_TCS_CMD_VOTE_MASK) vote_x = BCM_TCS_CMD_VOTE_MASK; if (vote_y > BCM_TCS_CMD_VOTE_MASK) vote_y = BCM_TCS_CMD_VOTE_MASK; cmd->addr = addr; cmd->data = BCM_TCS_CMD(commit, valid, vote_x, vote_y); /* * Set the wait for completion flag on command that need to be completed * before the next command. */ cmd->wait = wait; } static void tcs_list_gen(struct bcm_voter *voter, int bucket, struct tcs_cmd tcs_list[MAX_VCD], int n[MAX_VCD + 1]) { struct list_head *bcm_list = &voter->commit_list; struct qcom_icc_bcm *bcm; bool commit, wait; size_t idx = 0, batch = 0, cur_vcd_size = 0; memset(n, 0, sizeof(int) * (MAX_VCD + 1)); list_for_each_entry(bcm, bcm_list, list) { commit = false; cur_vcd_size++; if ((list_is_last(&bcm->list, bcm_list)) || bcm->aux_data.vcd != list_next_entry(bcm, list)->aux_data.vcd) { commit = true; cur_vcd_size = 0; } wait = commit && (voter->tcs_wait & BIT(bucket)); tcs_cmd_gen(&tcs_list[idx], bcm->vote_x[bucket], bcm->vote_y[bucket], bcm->addr, commit, wait); idx++; n[batch]++; /* * Batch the BCMs in such a way that we do not split them in * multiple payloads when they are under the same VCD. This is * to ensure that every BCM is committed since we only set the * commit bit on the last BCM request of every VCD. */ if (n[batch] >= MAX_RPMH_PAYLOAD) { if (!commit) { n[batch] -= cur_vcd_size; n[batch + 1] = cur_vcd_size; } batch++; } } } /** * of_bcm_voter_get - gets a bcm voter handle from DT node * @dev: device pointer for the consumer device * @name: name for the bcm voter device * * This function will match a device_node pointer for the phandle * specified in the device DT and return a bcm_voter handle on success. * * Returns bcm_voter pointer or ERR_PTR() on error. EPROBE_DEFER is returned * when matching bcm voter is yet to be found. */ struct bcm_voter *of_bcm_voter_get(struct device *dev, const char *name) { struct bcm_voter *voter = ERR_PTR(-EPROBE_DEFER); struct bcm_voter *temp; struct device_node *np, *node; int idx = 0; if (!dev || !dev->of_node) return ERR_PTR(-ENODEV); np = dev->of_node; if (name) { idx = of_property_match_string(np, "qcom,bcm-voter-names", name); if (idx < 0) return ERR_PTR(idx); } node = of_parse_phandle(np, "qcom,bcm-voters", idx); mutex_lock(&bcm_voter_lock); list_for_each_entry(temp, &bcm_voters, voter_node) { if (temp->np == node) { voter = temp; break; } } mutex_unlock(&bcm_voter_lock); of_node_put(node); return voter; } EXPORT_SYMBOL_GPL(of_bcm_voter_get); /** * qcom_icc_bcm_voter_add - queues up the bcm nodes that require updates * @voter: voter that the bcms are being added to * @bcm: bcm to add to the commit and wake sleep list */ void qcom_icc_bcm_voter_add(struct bcm_voter *voter, struct qcom_icc_bcm *bcm) { if (!voter) return; mutex_lock(&voter->lock); if (list_empty(&bcm->list)) list_add_tail(&bcm->list, &voter->commit_list); if (list_empty(&bcm->ws_list)) list_add_tail(&bcm->ws_list, &voter->ws_list); mutex_unlock(&voter->lock); } EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_add); /** * qcom_icc_bcm_voter_commit - generates and commits tcs cmds based on bcms * @voter: voter that needs flushing * * This function generates a set of AMC commands and flushes to the BCM device * associated with the voter. It conditionally generate WAKE and SLEEP commands * based on deltas between WAKE/SLEEP requirements. The ws_list persists * through multiple commit requests and bcm nodes are removed only when the * requirements for WAKE matches SLEEP. * * Returns 0 on success, or an appropriate error code otherwise. */ int qcom_icc_bcm_voter_commit(struct bcm_voter *voter) { struct qcom_icc_bcm *bcm; struct qcom_icc_bcm *bcm_tmp; int commit_idx[MAX_VCD + 1]; struct tcs_cmd cmds[MAX_BCMS]; int ret = 0; if (!voter) return 0; mutex_lock(&voter->lock); list_for_each_entry(bcm, &voter->commit_list, list) { if (bcm->enable_mask) bcm_aggregate_mask(bcm); else bcm_aggregate(bcm); } /* * Pre sort the BCMs based on VCD for ease of generating a command list * that groups the BCMs with the same VCD together. VCDs are numbered * with lowest being the most expensive time wise, ensuring that * those commands are being sent the earliest in the queue. This needs * to be sorted every commit since we can't guarantee the order in which * the BCMs are added to the list. */ list_sort(NULL, &voter->commit_list, cmp_vcd); /* * Construct the command list based on a pre ordered list of BCMs * based on VCD. */ tcs_list_gen(voter, QCOM_ICC_BUCKET_AMC, cmds, commit_idx); if (!commit_idx[0]) goto out; rpmh_invalidate(voter->dev); ret = rpmh_write_batch(voter->dev, RPMH_ACTIVE_ONLY_STATE, cmds, commit_idx); if (ret) { pr_err("Error sending AMC RPMH requests (%d)\n", ret); goto out; } list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list) list_del_init(&bcm->list); list_for_each_entry_safe(bcm, bcm_tmp, &voter->ws_list, ws_list) { /* * Only generate WAKE and SLEEP commands if a resource's * requirements change as the execution environment transitions * between different power states. */ if (bcm->vote_x[QCOM_ICC_BUCKET_WAKE] != bcm->vote_x[QCOM_ICC_BUCKET_SLEEP] || bcm->vote_y[QCOM_ICC_BUCKET_WAKE] != bcm->vote_y[QCOM_ICC_BUCKET_SLEEP]) list_add_tail(&bcm->list, &voter->commit_list); else list_del_init(&bcm->ws_list); } if (list_empty(&voter->commit_list)) goto out; list_sort(NULL, &voter->commit_list, cmp_vcd); tcs_list_gen(voter, QCOM_ICC_BUCKET_WAKE, cmds, commit_idx); ret = rpmh_write_batch(voter->dev, RPMH_WAKE_ONLY_STATE, cmds, commit_idx); if (ret) { pr_err("Error sending WAKE RPMH requests (%d)\n", ret); goto out; } tcs_list_gen(voter, QCOM_ICC_BUCKET_SLEEP, cmds, commit_idx); ret = rpmh_write_batch(voter->dev, RPMH_SLEEP_STATE, cmds, commit_idx); if (ret) { pr_err("Error sending SLEEP RPMH requests (%d)\n", ret); goto out; } out: list_for_each_entry_safe(bcm, bcm_tmp, &voter->commit_list, list) list_del_init(&bcm->list); mutex_unlock(&voter->lock); return ret; } EXPORT_SYMBOL_GPL(qcom_icc_bcm_voter_commit); static int qcom_icc_bcm_voter_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct bcm_voter *voter; voter = devm_kzalloc(&pdev->dev, sizeof(*voter), GFP_KERNEL); if (!voter) return -ENOMEM; voter->dev = &pdev->dev; voter->np = np; if (of_property_read_u32(np, "qcom,tcs-wait", &voter->tcs_wait)) voter->tcs_wait = QCOM_ICC_TAG_ACTIVE_ONLY; mutex_init(&voter->lock); INIT_LIST_HEAD(&voter->commit_list); INIT_LIST_HEAD(&voter->ws_list); mutex_lock(&bcm_voter_lock); list_add_tail(&voter->voter_node, &bcm_voters); mutex_unlock(&bcm_voter_lock); return 0; } static const struct of_device_id bcm_voter_of_match[] = { { .compatible = "qcom,bcm-voter" }, { } }; MODULE_DEVICE_TABLE(of, bcm_voter_of_match); static struct platform_driver qcom_icc_bcm_voter_driver = { .probe = qcom_icc_bcm_voter_probe, .driver = { .name = "bcm_voter", .of_match_table = bcm_voter_of_match, }, }; module_platform_driver(qcom_icc_bcm_voter_driver); MODULE_AUTHOR("David Dai <[email protected]>"); MODULE_DESCRIPTION("Qualcomm BCM Voter interconnect driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0 /* * SCMI Generic power domain support. * * Copyright (C) 2018-2021 ARM Ltd. */ #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/pm_domain.h> #include <linux/scmi_protocol.h> static const struct scmi_power_proto_ops *power_ops; struct scmi_pm_domain { struct generic_pm_domain genpd; const struct scmi_protocol_handle *ph; const char *name; u32 domain; }; #define to_scmi_pd(gpd) container_of(gpd, struct scmi_pm_domain, genpd) static int scmi_pd_power(struct generic_pm_domain *domain, bool power_on) { int ret; u32 state, ret_state; struct scmi_pm_domain *pd = to_scmi_pd(domain); if (power_on) state = SCMI_POWER_STATE_GENERIC_ON; else state = SCMI_POWER_STATE_GENERIC_OFF; ret = power_ops->state_set(pd->ph, pd->domain, state); if (!ret) ret = power_ops->state_get(pd->ph, pd->domain, &ret_state); if (!ret && state != ret_state) return -EIO; return ret; } static int scmi_pd_power_on(struct generic_pm_domain *domain) { return scmi_pd_power(domain, true); } static int scmi_pd_power_off(struct generic_pm_domain *domain) { return scmi_pd_power(domain, false); } static int scmi_pm_domain_probe(struct scmi_device *sdev) { int num_domains, i; struct device *dev = &sdev->dev; struct device_node *np = dev->of_node; struct scmi_pm_domain *scmi_pd; struct genpd_onecell_data *scmi_pd_data; struct generic_pm_domain **domains; const struct scmi_handle *handle = sdev->handle; struct scmi_protocol_handle *ph; if (!handle) return -ENODEV; power_ops = handle->devm_protocol_get(sdev, SCMI_PROTOCOL_POWER, &ph); if (IS_ERR(power_ops)) return PTR_ERR(power_ops); num_domains = power_ops->num_domains_get(ph); if (num_domains < 0) { dev_err(dev, "number of domains not found\n"); return num_domains; } scmi_pd = devm_kcalloc(dev, num_domains, sizeof(*scmi_pd), GFP_KERNEL); if (!scmi_pd) return -ENOMEM; scmi_pd_data = devm_kzalloc(dev, sizeof(*scmi_pd_data), GFP_KERNEL); if (!scmi_pd_data) return -ENOMEM; domains = devm_kcalloc(dev, num_domains, sizeof(*domains), GFP_KERNEL); if (!domains) return -ENOMEM; for (i = 0; i < num_domains; i++, scmi_pd++) { u32 state; if (power_ops->state_get(ph, i, &state)) { dev_warn(dev, "failed to get state for domain %d\n", i); continue; } scmi_pd->domain = i; scmi_pd->ph = ph; scmi_pd->name = power_ops->name_get(ph, i); scmi_pd->genpd.name = scmi_pd->name; scmi_pd->genpd.power_off = scmi_pd_power_off; scmi_pd->genpd.power_on = scmi_pd_power_on; scmi_pd->genpd.flags = GENPD_FLAG_ACTIVE_WAKEUP; pm_genpd_init(&scmi_pd->genpd, NULL, state == SCMI_POWER_STATE_GENERIC_OFF); domains[i] = &scmi_pd->genpd; } scmi_pd_data->domains = domains; scmi_pd_data->num_domains = num_domains; dev_set_drvdata(dev, scmi_pd_data); return of_genpd_add_provider_onecell(np, scmi_pd_data); } static void scmi_pm_domain_remove(struct scmi_device *sdev) { int i; struct genpd_onecell_data *scmi_pd_data; struct device *dev = &sdev->dev; struct device_node *np = dev->of_node; of_genpd_del_provider(np); scmi_pd_data = dev_get_drvdata(dev); for (i = 0; i < scmi_pd_data->num_domains; i++) { if (!scmi_pd_data->domains[i]) continue; pm_genpd_remove(scmi_pd_data->domains[i]); } } static const struct scmi_device_id scmi_id_table[] = { { SCMI_PROTOCOL_POWER, "genpd" }, { }, }; MODULE_DEVICE_TABLE(scmi, scmi_id_table); static struct scmi_driver scmi_power_domain_driver = { .name = "scmi-power-domain", .probe = scmi_pm_domain_probe, .remove = scmi_pm_domain_remove, .id_table = scmi_id_table, }; module_scmi_driver(scmi_power_domain_driver); MODULE_AUTHOR("Sudeep Holla <[email protected]>"); MODULE_DESCRIPTION("ARM SCMI power domain driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0 #include <linux/err.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "messages.h" #include "ctree.h" #include "extent_map.h" #include "compression.h" #include "btrfs_inode.h" #include "disk-io.h" static struct kmem_cache *extent_map_cache; int __init extent_map_init(void) { extent_map_cache = kmem_cache_create("btrfs_extent_map", sizeof(struct extent_map), 0, 0, NULL); if (!extent_map_cache) return -ENOMEM; return 0; } void __cold extent_map_exit(void) { kmem_cache_destroy(extent_map_cache); } /* * Initialize the extent tree @tree. Should be called for each new inode or * other user of the extent_map interface. */ void extent_map_tree_init(struct extent_map_tree *tree) { tree->root = RB_ROOT; INIT_LIST_HEAD(&tree->modified_extents); rwlock_init(&tree->lock); } /* * Allocate a new extent_map structure. The new structure is returned with a * reference count of one and needs to be freed using free_extent_map() */ struct extent_map *alloc_extent_map(void) { struct extent_map *em; em = kmem_cache_zalloc(extent_map_cache, GFP_NOFS); if (!em) return NULL; RB_CLEAR_NODE(&em->rb_node); refcount_set(&em->refs, 1); INIT_LIST_HEAD(&em->list); return em; } /* * Drop the reference out on @em by one and free the structure if the reference * count hits zero. */ void free_extent_map(struct extent_map *em) { if (!em) return; if (refcount_dec_and_test(&em->refs)) { WARN_ON(extent_map_in_tree(em)); WARN_ON(!list_empty(&em->list)); kmem_cache_free(extent_map_cache, em); } } /* Do the math around the end of an extent, handling wrapping. */ static u64 range_end(u64 start, u64 len) { if (start + len < start) return (u64)-1; return start + len; } static void remove_em(struct btrfs_inode *inode, struct extent_map *em) { struct btrfs_fs_info *fs_info = inode->root->fs_info; rb_erase(&em->rb_node, &inode->extent_tree.root); RB_CLEAR_NODE(&em->rb_node); if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(inode->root))) percpu_counter_dec(&fs_info->evictable_extent_maps); } static int tree_insert(struct rb_root *root, struct extent_map *em) { struct rb_node **p = &root->rb_node; struct rb_node *parent = NULL; struct extent_map *entry = NULL; struct rb_node *orig_parent = NULL; u64 end = range_end(em->start, em->len); while (*p) { parent = *p; entry = rb_entry(parent, struct extent_map, rb_node); if (em->start < entry->start) p = &(*p)->rb_left; else if (em->start >= extent_map_end(entry)) p = &(*p)->rb_right; else return -EEXIST; } orig_parent = parent; while (parent && em->start >= extent_map_end(entry)) { parent = rb_next(parent); entry = rb_entry(parent, struct extent_map, rb_node); } if (parent) if (end > entry->start && em->start < extent_map_end(entry)) return -EEXIST; parent = orig_parent; entry = rb_entry(parent, struct extent_map, rb_node); while (parent && em->start < entry->start) { parent = rb_prev(parent); entry = rb_entry(parent, struct extent_map, rb_node); } if (parent) if (end > entry->start && em->start < extent_map_end(entry)) return -EEXIST; rb_link_node(&em->rb_node, orig_parent, p); rb_insert_color(&em->rb_node, root); return 0; } /* * Search through the tree for an extent_map with a given offset. If it can't * be found, try to find some neighboring extents */ static struct rb_node *__tree_search(struct rb_root *root, u64 offset, struct rb_node **prev_or_next_ret) { struct rb_node *n = root->rb_node; struct rb_node *prev = NULL; struct rb_node *orig_prev = NULL; struct extent_map *entry; struct extent_map *prev_entry = NULL; ASSERT(prev_or_next_ret); while (n) { entry = rb_entry(n, struct extent_map, rb_node); prev = n; prev_entry = entry; if (offset < entry->start) n = n->rb_left; else if (offset >= extent_map_end(entry)) n = n->rb_right; else return n; } orig_prev = prev; while (prev && offset >= extent_map_end(prev_entry)) { prev = rb_next(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } /* * Previous extent map found, return as in this case the caller does not * care about the next one. */ if (prev) { *prev_or_next_ret = prev; return NULL; } prev = orig_prev; prev_entry = rb_entry(prev, struct extent_map, rb_node); while (prev && offset < prev_entry->start) { prev = rb_prev(prev); prev_entry = rb_entry(prev, struct extent_map, rb_node); } *prev_or_next_ret = prev; return NULL; } static inline u64 extent_map_block_len(const struct extent_map *em) { if (extent_map_is_compressed(em)) return em->disk_num_bytes; return em->len; } static inline u64 extent_map_block_end(const struct extent_map *em) { const u64 block_start = extent_map_block_start(em); const u64 block_end = block_start + extent_map_block_len(em); if (block_end < block_start) return (u64)-1; return block_end; } static bool can_merge_extent_map(const struct extent_map *em) { if (em->flags & EXTENT_FLAG_PINNED) return false; /* Don't merge compressed extents, we need to know their actual size. */ if (extent_map_is_compressed(em)) return false; if (em->flags & EXTENT_FLAG_LOGGING) return false; /* * We don't want to merge stuff that hasn't been written to the log yet * since it may not reflect exactly what is on disk, and that would be * bad. */ if (!list_empty(&em->list)) return false; return true; } /* Check to see if two extent_map structs are adjacent and safe to merge. */ static bool mergeable_maps(const struct extent_map *prev, const struct extent_map *next) { if (extent_map_end(prev) != next->start) return false; /* * The merged flag is not an on-disk flag, it just indicates we had the * extent maps of 2 (or more) adjacent extents merged, so factor it out. */ if ((prev->flags & ~EXTENT_FLAG_MERGED) != (next->flags & ~EXTENT_FLAG_MERGED)) return false; if (next->disk_bytenr < EXTENT_MAP_LAST_BYTE - 1) return extent_map_block_start(next) == extent_map_block_end(prev); /* HOLES and INLINE extents. */ return next->disk_bytenr == prev->disk_bytenr; } /* * Handle the on-disk data extents merge for @prev and @next. * * @prev: left extent to merge * @next: right extent to merge * @merged: the extent we will not discard after the merge; updated with new values * * After this, one of the two extents is the new merged extent and the other is * removed from the tree and likely freed. Note that @merged is one of @prev/@next * so there is const/non-const aliasing occurring here. * * Only touches disk_bytenr/disk_num_bytes/offset/ram_bytes. * For now only uncompressed regular extent can be merged. */ static void merge_ondisk_extents(const struct extent_map *prev, const struct extent_map *next, struct extent_map *merged) { u64 new_disk_bytenr; u64 new_disk_num_bytes; u64 new_offset; /* @prev and @next should not be compressed. */ ASSERT(!extent_map_is_compressed(prev)); ASSERT(!extent_map_is_compressed(next)); /* * There are two different cases where @prev and @next can be merged. * * 1) They are referring to the same data extent: * * |<----- data extent A ----->| * |<- prev ->|<- next ->| * * 2) They are referring to different data extents but still adjacent: * * |<-- data extent A -->|<-- data extent B -->| * |<- prev ->|<- next ->| * * The calculation here always merges the data extents first, then updates * @offset using the new data extents. * * For case 1), the merged data extent would be the same. * For case 2), we just merge the two data extents into one. */ new_disk_bytenr = min(prev->disk_bytenr, next->disk_bytenr); new_disk_num_bytes = max(prev->disk_bytenr + prev->disk_num_bytes, next->disk_bytenr + next->disk_num_bytes) - new_disk_bytenr; new_offset = prev->disk_bytenr + prev->offset - new_disk_bytenr; merged->disk_bytenr = new_disk_bytenr; merged->disk_num_bytes = new_disk_num_bytes; merged->ram_bytes = new_disk_num_bytes; merged->offset = new_offset; } static void dump_extent_map(struct btrfs_fs_info *fs_info, const char *prefix, struct extent_map *em) { if (!IS_ENABLED(CONFIG_BTRFS_DEBUG)) return; btrfs_crit(fs_info, "%s, start=%llu len=%llu disk_bytenr=%llu disk_num_bytes=%llu ram_bytes=%llu offset=%llu flags=0x%x", prefix, em->start, em->len, em->disk_bytenr, em->disk_num_bytes, em->ram_bytes, em->offset, em->flags); ASSERT(0); } /* Internal sanity checks for btrfs debug builds. */ static void validate_extent_map(struct btrfs_fs_info *fs_info, struct extent_map *em) { if (!IS_ENABLED(CONFIG_BTRFS_DEBUG)) return; if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { if (em->disk_num_bytes == 0) dump_extent_map(fs_info, "zero disk_num_bytes", em); if (em->offset + em->len > em->ram_bytes) dump_extent_map(fs_info, "ram_bytes too small", em); if (em->offset + em->len > em->disk_num_bytes && !extent_map_is_compressed(em)) dump_extent_map(fs_info, "disk_num_bytes too small", em); if (!extent_map_is_compressed(em) && em->ram_bytes != em->disk_num_bytes) dump_extent_map(fs_info, "ram_bytes mismatch with disk_num_bytes for non-compressed em", em); } else if (em->offset) { dump_extent_map(fs_info, "non-zero offset for hole/inline", em); } } static void try_merge_map(struct btrfs_inode *inode, struct extent_map *em) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map *merge = NULL; struct rb_node *rb; /* * We can't modify an extent map that is in the tree and that is being * used by another task, as it can cause that other task to see it in * inconsistent state during the merging. We always have 1 reference for * the tree and 1 for this task (which is unpinning the extent map or * clearing the logging flag), so anything > 2 means it's being used by * other tasks too. */ if (refcount_read(&em->refs) > 2) return; if (!can_merge_extent_map(em)) return; if (em->start != 0) { rb = rb_prev(&em->rb_node); if (rb) merge = rb_entry(rb, struct extent_map, rb_node); if (rb && can_merge_extent_map(merge) && mergeable_maps(merge, em)) { em->start = merge->start; em->len += merge->len; em->generation = max(em->generation, merge->generation); if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) merge_ondisk_extents(merge, em, em); em->flags |= EXTENT_FLAG_MERGED; validate_extent_map(fs_info, em); remove_em(inode, merge); free_extent_map(merge); } } rb = rb_next(&em->rb_node); if (rb) merge = rb_entry(rb, struct extent_map, rb_node); if (rb && can_merge_extent_map(merge) && mergeable_maps(em, merge)) { em->len += merge->len; if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) merge_ondisk_extents(em, merge, em); validate_extent_map(fs_info, em); em->generation = max(em->generation, merge->generation); em->flags |= EXTENT_FLAG_MERGED; remove_em(inode, merge); free_extent_map(merge); } } /* * Unpin an extent from the cache. * * @inode: the inode from which we are unpinning an extent range * @start: logical offset in the file * @len: length of the extent * @gen: generation that this extent has been modified in * * Called after an extent has been written to disk properly. Set the generation * to the generation that actually added the file item to the inode so we know * we need to sync this extent when we call fsync(). * * Returns: 0 on success * -ENOENT when the extent is not found in the tree * -EUCLEAN if the found extent does not match the expected start */ int unpin_extent_cache(struct btrfs_inode *inode, u64 start, u64 len, u64 gen) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map_tree *tree = &inode->extent_tree; int ret = 0; struct extent_map *em; write_lock(&tree->lock); em = lookup_extent_mapping(tree, start, len); if (WARN_ON(!em)) { btrfs_warn(fs_info, "no extent map found for inode %llu (root %lld) when unpinning extent range [%llu, %llu), generation %llu", btrfs_ino(inode), btrfs_root_id(inode->root), start, start + len, gen); ret = -ENOENT; goto out; } if (WARN_ON(em->start != start)) { btrfs_warn(fs_info, "found extent map for inode %llu (root %lld) with unexpected start offset %llu when unpinning extent range [%llu, %llu), generation %llu", btrfs_ino(inode), btrfs_root_id(inode->root), em->start, start, start + len, gen); ret = -EUCLEAN; goto out; } em->generation = gen; em->flags &= ~EXTENT_FLAG_PINNED; try_merge_map(inode, em); out: write_unlock(&tree->lock); free_extent_map(em); return ret; } void clear_em_logging(struct btrfs_inode *inode, struct extent_map *em) { lockdep_assert_held_write(&inode->extent_tree.lock); em->flags &= ~EXTENT_FLAG_LOGGING; if (extent_map_in_tree(em)) try_merge_map(inode, em); } static inline void setup_extent_mapping(struct btrfs_inode *inode, struct extent_map *em, int modified) { refcount_inc(&em->refs); ASSERT(list_empty(&em->list)); if (modified) list_add(&em->list, &inode->extent_tree.modified_extents); else try_merge_map(inode, em); } /* * Add a new extent map to an inode's extent map tree. * * @inode: the target inode * @em: map to insert * @modified: indicate whether the given @em should be added to the * modified list, which indicates the extent needs to be logged * * Insert @em into the @inode's extent map tree or perform a simple * forward/backward merge with existing mappings. The extent_map struct passed * in will be inserted into the tree directly, with an additional reference * taken, or a reference dropped if the merge attempt was successful. */ static int add_extent_mapping(struct btrfs_inode *inode, struct extent_map *em, int modified) { struct extent_map_tree *tree = &inode->extent_tree; struct btrfs_root *root = inode->root; struct btrfs_fs_info *fs_info = root->fs_info; int ret; lockdep_assert_held_write(&tree->lock); validate_extent_map(fs_info, em); ret = tree_insert(&tree->root, em); if (ret) return ret; setup_extent_mapping(inode, em, modified); if (!btrfs_is_testing(fs_info) && is_fstree(btrfs_root_id(root))) percpu_counter_inc(&fs_info->evictable_extent_maps); return 0; } static struct extent_map * __lookup_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len, int strict) { struct extent_map *em; struct rb_node *rb_node; struct rb_node *prev_or_next = NULL; u64 end = range_end(start, len); rb_node = __tree_search(&tree->root, start, &prev_or_next); if (!rb_node) { if (prev_or_next) rb_node = prev_or_next; else return NULL; } em = rb_entry(rb_node, struct extent_map, rb_node); if (strict && !(end > em->start && start < extent_map_end(em))) return NULL; refcount_inc(&em->refs); return em; } /* * Lookup extent_map that intersects @start + @len range. * * @tree: tree to lookup in * @start: byte offset to start the search * @len: length of the lookup range * * Find and return the first extent_map struct in @tree that intersects the * [start, len] range. There may be additional objects in the tree that * intersect, so check the object returned carefully to make sure that no * additional lookups are needed. */ struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len) { return __lookup_extent_mapping(tree, start, len, 1); } /* * Find a nearby extent map intersecting @start + @len (not an exact search). * * @tree: tree to lookup in * @start: byte offset to start the search * @len: length of the lookup range * * Find and return the first extent_map struct in @tree that intersects the * [start, len] range. * * If one can't be found, any nearby extent may be returned */ struct extent_map *search_extent_mapping(struct extent_map_tree *tree, u64 start, u64 len) { return __lookup_extent_mapping(tree, start, len, 0); } /* * Remove an extent_map from its inode's extent tree. * * @inode: the inode the extent map belongs to * @em: extent map being removed * * Remove @em from the extent tree of @inode. No reference counts are dropped, * and no checks are done to see if the range is in use. */ void remove_extent_mapping(struct btrfs_inode *inode, struct extent_map *em) { struct extent_map_tree *tree = &inode->extent_tree; lockdep_assert_held_write(&tree->lock); WARN_ON(em->flags & EXTENT_FLAG_PINNED); if (!(em->flags & EXTENT_FLAG_LOGGING)) list_del_init(&em->list); remove_em(inode, em); } static void replace_extent_mapping(struct btrfs_inode *inode, struct extent_map *cur, struct extent_map *new, int modified) { struct btrfs_fs_info *fs_info = inode->root->fs_info; struct extent_map_tree *tree = &inode->extent_tree; lockdep_assert_held_write(&tree->lock); validate_extent_map(fs_info, new); WARN_ON(cur->flags & EXTENT_FLAG_PINNED); ASSERT(extent_map_in_tree(cur)); if (!(cur->flags & EXTENT_FLAG_LOGGING)) list_del_init(&cur->list); rb_replace_node(&cur->rb_node, &new->rb_node, &tree->root); RB_CLEAR_NODE(&cur->rb_node); setup_extent_mapping(inode, new, modified); } static struct extent_map *next_extent_map(const struct extent_map *em) { struct rb_node *next; next = rb_next(&em->rb_node); if (!next) return NULL; return container_of(next, struct extent_map, rb_node); } static struct extent_map *prev_extent_map(struct extent_map *em) { struct rb_node *prev; prev = rb_prev(&em->rb_node); if (!prev) return NULL; return container_of(prev, struct extent_map, rb_node); } /* * Helper for btrfs_get_extent. Given an existing extent in the tree, * the existing extent is the nearest extent to map_start, * and an extent that you want to insert, deal with overlap and insert * the best fitted new extent into the tree. */ static noinline int merge_extent_mapping(struct btrfs_inode *inode, struct extent_map *existing, struct extent_map *em, u64 map_start) { struct extent_map *prev; struct extent_map *next; u64 start; u64 end; u64 start_diff; if (map_start < em->start || map_start >= extent_map_end(em)) return -EINVAL; if (existing->start > map_start) { next = existing; prev = prev_extent_map(next); } else { prev = existing; next = next_extent_map(prev); } start = prev ? extent_map_end(prev) : em->start; start = max_t(u64, start, em->start); end = next ? next->start : extent_map_end(em); end = min_t(u64, end, extent_map_end(em)); start_diff = start - em->start; em->start = start; em->len = end - start; if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) em->offset += start_diff; return add_extent_mapping(inode, em, 0); } /* * Add extent mapping into an inode's extent map tree. * * @inode: target inode * @em_in: extent we are inserting * @start: start of the logical range btrfs_get_extent() is requesting * @len: length of the logical range btrfs_get_extent() is requesting * * Note that @em_in's range may be different from [start, start+len), * but they must be overlapped. * * Insert @em_in into the inode's extent map tree. In case there is an * overlapping range, handle the -EEXIST by either: * a) Returning the existing extent in @em_in if @start is within the * existing em. * b) Merge the existing extent with @em_in passed in. * * Return 0 on success, otherwise -EEXIST. * */ int btrfs_add_extent_mapping(struct btrfs_inode *inode, struct extent_map **em_in, u64 start, u64 len) { int ret; struct extent_map *em = *em_in; struct btrfs_fs_info *fs_info = inode->root->fs_info; /* * Tree-checker should have rejected any inline extent with non-zero * file offset. Here just do a sanity check. */ if (em->disk_bytenr == EXTENT_MAP_INLINE) ASSERT(em->start == 0); ret = add_extent_mapping(inode, em, 0); /* it is possible that someone inserted the extent into the tree * while we had the lock dropped. It is also possible that * an overlapping map exists in the tree */ if (ret == -EEXIST) { struct extent_map *existing; existing = search_extent_mapping(&inode->extent_tree, start, len); trace_btrfs_handle_em_exist(fs_info, existing, em, start, len); /* * existing will always be non-NULL, since there must be * extent causing the -EEXIST. */ if (start >= existing->start && start < extent_map_end(existing)) { free_extent_map(em); *em_in = existing; ret = 0; } else { u64 orig_start = em->start; u64 orig_len = em->len; /* * The existing extent map is the one nearest to * the [start, start + len) range which overlaps */ ret = merge_extent_mapping(inode, existing, em, start); if (WARN_ON(ret)) { free_extent_map(em); *em_in = NULL; btrfs_warn(fs_info, "extent map merge error existing [%llu, %llu) with em [%llu, %llu) start %llu", existing->start, extent_map_end(existing), orig_start, orig_start + orig_len, start); } free_extent_map(existing); } } ASSERT(ret == 0 || ret == -EEXIST); return ret; } /* * Drop all extent maps from a tree in the fastest possible way, rescheduling * if needed. This avoids searching the tree, from the root down to the first * extent map, before each deletion. */ static void drop_all_extent_maps_fast(struct btrfs_inode *inode) { struct extent_map_tree *tree = &inode->extent_tree; struct rb_node *node; write_lock(&tree->lock); node = rb_first(&tree->root); while (node) { struct extent_map *em; struct rb_node *next = rb_next(node); em = rb_entry(node, struct extent_map, rb_node); em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING); remove_extent_mapping(inode, em); free_extent_map(em); if (cond_resched_rwlock_write(&tree->lock)) node = rb_first(&tree->root); else node = next; } write_unlock(&tree->lock); } /* * Drop all extent maps in a given range. * * @inode: The target inode. * @start: Start offset of the range. * @end: End offset of the range (inclusive value). * @skip_pinned: Indicate if pinned extent maps should be ignored or not. * * This drops all the extent maps that intersect the given range [@start, @end]. * Extent maps that partially overlap the range and extend behind or beyond it, * are split. * The caller should have locked an appropriate file range in the inode's io * tree before calling this function. */ void btrfs_drop_extent_map_range(struct btrfs_inode *inode, u64 start, u64 end, bool skip_pinned) { struct extent_map *split; struct extent_map *split2; struct extent_map *em; struct extent_map_tree *em_tree = &inode->extent_tree; u64 len = end - start + 1; WARN_ON(end < start); if (end == (u64)-1) { if (start == 0 && !skip_pinned) { drop_all_extent_maps_fast(inode); return; } len = (u64)-1; } else { /* Make end offset exclusive for use in the loop below. */ end++; } /* * It's ok if we fail to allocate the extent maps, see the comment near * the bottom of the loop below. We only need two spare extent maps in * the worst case, where the first extent map that intersects our range * starts before the range and the last extent map that intersects our * range ends after our range (and they might be the same extent map), * because we need to split those two extent maps at the boundaries. */ split = alloc_extent_map(); split2 = alloc_extent_map(); write_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); while (em) { /* extent_map_end() returns exclusive value (last byte + 1). */ const u64 em_end = extent_map_end(em); struct extent_map *next_em = NULL; u64 gen; unsigned long flags; bool modified; if (em_end < end) { next_em = next_extent_map(em); if (next_em) { if (next_em->start < end) refcount_inc(&next_em->refs); else next_em = NULL; } } if (skip_pinned && (em->flags & EXTENT_FLAG_PINNED)) { start = em_end; goto next; } flags = em->flags; /* * In case we split the extent map, we want to preserve the * EXTENT_FLAG_LOGGING flag on our extent map, but we don't want * it on the new extent maps. */ em->flags &= ~(EXTENT_FLAG_PINNED | EXTENT_FLAG_LOGGING); modified = !list_empty(&em->list); /* * The extent map does not cross our target range, so no need to * split it, we can remove it directly. */ if (em->start >= start && em_end <= end) goto remove_em; gen = em->generation; if (em->start < start) { if (!split) { split = split2; split2 = NULL; if (!split) goto remove_em; } split->start = em->start; split->len = start - em->start; if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { split->disk_bytenr = em->disk_bytenr; split->disk_num_bytes = em->disk_num_bytes; split->offset = em->offset; split->ram_bytes = em->ram_bytes; } else { split->disk_bytenr = em->disk_bytenr; split->disk_num_bytes = 0; split->offset = 0; split->ram_bytes = split->len; } split->generation = gen; split->flags = flags; replace_extent_mapping(inode, em, split, modified); free_extent_map(split); split = split2; split2 = NULL; } if (em_end > end) { if (!split) { split = split2; split2 = NULL; if (!split) goto remove_em; } split->start = end; split->len = em_end - end; split->disk_bytenr = em->disk_bytenr; split->flags = flags; split->generation = gen; if (em->disk_bytenr < EXTENT_MAP_LAST_BYTE) { split->disk_num_bytes = em->disk_num_bytes; split->offset = em->offset + end - em->start; split->ram_bytes = em->ram_bytes; } else { split->disk_num_bytes = 0; split->offset = 0; split->ram_bytes = split->len; } if (extent_map_in_tree(em)) { replace_extent_mapping(inode, em, split, modified); } else { int ret; ret = add_extent_mapping(inode, split, modified); /* Logic error, shouldn't happen. */ ASSERT(ret == 0); if (WARN_ON(ret != 0) && modified) btrfs_set_inode_full_sync(inode); } free_extent_map(split); split = NULL; } remove_em: if (extent_map_in_tree(em)) { /* * If the extent map is still in the tree it means that * either of the following is true: * * 1) It fits entirely in our range (doesn't end beyond * it or starts before it); * * 2) It starts before our range and/or ends after our * range, and we were not able to allocate the extent * maps for split operations, @split and @split2. * * If we are at case 2) then we just remove the entire * extent map - this is fine since if anyone needs it to * access the subranges outside our range, will just * load it again from the subvolume tree's file extent * item. However if the extent map was in the list of * modified extents, then we must mark the inode for a * full fsync, otherwise a fast fsync will miss this * extent if it's new and needs to be logged. */ if ((em->start < start || em_end > end) && modified) { ASSERT(!split); btrfs_set_inode_full_sync(inode); } remove_extent_mapping(inode, em); } /* * Once for the tree reference (we replaced or removed the * extent map from the tree). */ free_extent_map(em); next: /* Once for us (for our lookup reference). */ free_extent_map(em); em = next_em; } write_unlock(&em_tree->lock); free_extent_map(split); free_extent_map(split2); } /* * Replace a range in the inode's extent map tree with a new extent map. * * @inode: The target inode. * @new_em: The new extent map to add to the inode's extent map tree. * @modified: Indicate if the new extent map should be added to the list of * modified extents (for fast fsync tracking). * * Drops all the extent maps in the inode's extent map tree that intersect the * range of the new extent map and adds the new extent map to the tree. * The caller should have locked an appropriate file range in the inode's io * tree before calling this function. */ int btrfs_replace_extent_map_range(struct btrfs_inode *inode, struct extent_map *new_em, bool modified) { const u64 end = new_em->start + new_em->len - 1; struct extent_map_tree *tree = &inode->extent_tree; int ret; ASSERT(!extent_map_in_tree(new_em)); /* * The caller has locked an appropriate file range in the inode's io * tree, but getting -EEXIST when adding the new extent map can still * happen in case there are extents that partially cover the range, and * this is due to two tasks operating on different parts of the extent. * See commit 18e83ac75bfe67 ("Btrfs: fix unexpected EEXIST from * btrfs_get_extent") for an example and details. */ do { btrfs_drop_extent_map_range(inode, new_em->start, end, false); write_lock(&tree->lock); ret = add_extent_mapping(inode, new_em, modified); write_unlock(&tree->lock); } while (ret == -EEXIST); return ret; } /* * Split off the first pre bytes from the extent_map at [start, start + len], * and set the block_start for it to new_logical. * * This function is used when an ordered_extent needs to be split. */ int split_extent_map(struct btrfs_inode *inode, u64 start, u64 len, u64 pre, u64 new_logical) { struct extent_map_tree *em_tree = &inode->extent_tree; struct extent_map *em; struct extent_map *split_pre = NULL; struct extent_map *split_mid = NULL; int ret = 0; unsigned long flags; ASSERT(pre != 0); ASSERT(pre < len); split_pre = alloc_extent_map(); if (!split_pre) return -ENOMEM; split_mid = alloc_extent_map(); if (!split_mid) { ret = -ENOMEM; goto out_free_pre; } lock_extent(&inode->io_tree, start, start + len - 1, NULL); write_lock(&em_tree->lock); em = lookup_extent_mapping(em_tree, start, len); if (!em) { ret = -EIO; goto out_unlock; } ASSERT(em->len == len); ASSERT(!extent_map_is_compressed(em)); ASSERT(em->disk_bytenr < EXTENT_MAP_LAST_BYTE); ASSERT(em->flags & EXTENT_FLAG_PINNED); ASSERT(!(em->flags & EXTENT_FLAG_LOGGING)); ASSERT(!list_empty(&em->list)); flags = em->flags; em->flags &= ~EXTENT_FLAG_PINNED; /* First, replace the em with a new extent_map starting from * em->start */ split_pre->start = em->start; split_pre->len = pre; split_pre->disk_bytenr = new_logical; split_pre->disk_num_bytes = split_pre->len; split_pre->offset = 0; split_pre->ram_bytes = split_pre->len; split_pre->flags = flags; split_pre->generation = em->generation; replace_extent_mapping(inode, em, split_pre, 1); /* * Now we only have an extent_map at: * [em->start, em->start + pre] */ /* Insert the middle extent_map. */ split_mid->start = em->start + pre; split_mid->len = em->len - pre; split_mid->disk_bytenr = extent_map_block_start(em) + pre; split_mid->disk_num_bytes = split_mid->len; split_mid->offset = 0; split_mid->ram_bytes = split_mid->len; split_mid->flags = flags; split_mid->generation = em->generation; add_extent_mapping(inode, split_mid, 1); /* Once for us */ free_extent_map(em); /* Once for the tree */ free_extent_map(em); out_unlock: write_unlock(&em_tree->lock); unlock_extent(&inode->io_tree, start, start + len - 1, NULL); free_extent_map(split_mid); out_free_pre: free_extent_map(split_pre); return ret; } struct btrfs_em_shrink_ctx { long nr_to_scan; long scanned; }; static long btrfs_scan_inode(struct btrfs_inode *inode, struct btrfs_em_shrink_ctx *ctx) { struct btrfs_fs_info *fs_info = inode->root->fs_info; const u64 cur_fs_gen = btrfs_get_fs_generation(fs_info); struct extent_map_tree *tree = &inode->extent_tree; long nr_dropped = 0; struct rb_node *node; /* * Take the mmap lock so that we serialize with the inode logging phase * of fsync because we may need to set the full sync flag on the inode, * in case we have to remove extent maps in the tree's list of modified * extents. If we set the full sync flag in the inode while an fsync is * in progress, we may risk missing new extents because before the flag * is set, fsync decides to only wait for writeback to complete and then * during inode logging it sees the flag set and uses the subvolume tree * to find new extents, which may not be there yet because ordered * extents haven't completed yet. * * We also do a try lock because otherwise we could deadlock. This is * because the shrinker for this filesystem may be invoked while we are * in a path that is holding the mmap lock in write mode. For example in * a reflink operation while COWing an extent buffer, when allocating * pages for a new extent buffer and under memory pressure, the shrinker * may be invoked, and therefore we would deadlock by attempting to read * lock the mmap lock while we are holding already a write lock on it. */ if (!down_read_trylock(&inode->i_mmap_lock)) return 0; /* * We want to be fast so if the lock is busy we don't want to spend time * waiting for it - either some task is about to do IO for the inode or * we may have another task shrinking extent maps, here in this code, so * skip this inode. */ if (!write_trylock(&tree->lock)) { up_read(&inode->i_mmap_lock); return 0; } node = rb_first(&tree->root); while (node) { struct rb_node *next = rb_next(node); struct extent_map *em; em = rb_entry(node, struct extent_map, rb_node); ctx->scanned++; if (em->flags & EXTENT_FLAG_PINNED) goto next; /* * If the inode is in the list of modified extents (new) and its * generation is the same (or is greater than) the current fs * generation, it means it was not yet persisted so we have to * set the full sync flag so that the next fsync will not miss * it. */ if (!list_empty(&em->list) && em->generation >= cur_fs_gen) btrfs_set_inode_full_sync(inode); remove_extent_mapping(inode, em); trace_btrfs_extent_map_shrinker_remove_em(inode, em); /* Drop the reference for the tree. */ free_extent_map(em); nr_dropped++; next: if (ctx->scanned >= ctx->nr_to_scan) break; /* * Stop if we need to reschedule or there's contention on the * lock. This is to avoid slowing other tasks trying to take the * lock. */ if (need_resched() || rwlock_needbreak(&tree->lock) || btrfs_fs_closing(fs_info)) break; node = next; } write_unlock(&tree->lock); up_read(&inode->i_mmap_lock); return nr_dropped; } static long btrfs_scan_root(struct btrfs_root *root, struct btrfs_em_shrink_ctx *ctx) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_inode *inode; long nr_dropped = 0; u64 min_ino = fs_info->em_shrinker_last_ino + 1; inode = btrfs_find_first_inode(root, min_ino); while (inode) { nr_dropped += btrfs_scan_inode(inode, ctx); min_ino = btrfs_ino(inode) + 1; fs_info->em_shrinker_last_ino = btrfs_ino(inode); btrfs_add_delayed_iput(inode); if (ctx->scanned >= ctx->nr_to_scan || btrfs_fs_closing(inode->root->fs_info)) break; cond_resched(); inode = btrfs_find_first_inode(root, min_ino); } if (inode) { /* * There are still inodes in this root or we happened to process * the last one and reached the scan limit. In either case set * the current root to this one, so we'll resume from the next * inode if there is one or we will find out this was the last * one and move to the next root. */ fs_info->em_shrinker_last_root = btrfs_root_id(root); } else { /* * No more inodes in this root, set extent_map_shrinker_last_ino to 0 so * that when processing the next root we start from its first inode. */ fs_info->em_shrinker_last_ino = 0; fs_info->em_shrinker_last_root = btrfs_root_id(root) + 1; } return nr_dropped; } static void btrfs_extent_map_shrinker_worker(struct work_struct *work) { struct btrfs_fs_info *fs_info; struct btrfs_em_shrink_ctx ctx; u64 start_root_id; u64 next_root_id; bool cycled = false; long nr_dropped = 0; fs_info = container_of(work, struct btrfs_fs_info, em_shrinker_work); ctx.scanned = 0; ctx.nr_to_scan = atomic64_read(&fs_info->em_shrinker_nr_to_scan); start_root_id = fs_info->em_shrinker_last_root; next_root_id = fs_info->em_shrinker_last_root; if (trace_btrfs_extent_map_shrinker_scan_enter_enabled()) { s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); trace_btrfs_extent_map_shrinker_scan_enter(fs_info, nr); } while (ctx.scanned < ctx.nr_to_scan && !btrfs_fs_closing(fs_info)) { struct btrfs_root *root; unsigned long count; cond_resched(); spin_lock(&fs_info->fs_roots_radix_lock); count = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)&root, (unsigned long)next_root_id, 1); if (count == 0) { spin_unlock(&fs_info->fs_roots_radix_lock); if (start_root_id > 0 && !cycled) { next_root_id = 0; fs_info->em_shrinker_last_root = 0; fs_info->em_shrinker_last_ino = 0; cycled = true; continue; } break; } next_root_id = btrfs_root_id(root) + 1; root = btrfs_grab_root(root); spin_unlock(&fs_info->fs_roots_radix_lock); if (!root) continue; if (is_fstree(btrfs_root_id(root))) nr_dropped += btrfs_scan_root(root, &ctx); btrfs_put_root(root); } if (trace_btrfs_extent_map_shrinker_scan_exit_enabled()) { s64 nr = percpu_counter_sum_positive(&fs_info->evictable_extent_maps); trace_btrfs_extent_map_shrinker_scan_exit(fs_info, nr_dropped, nr); } atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0); } void btrfs_free_extent_maps(struct btrfs_fs_info *fs_info, long nr_to_scan) { /* * Do nothing if the shrinker is already running. In case of high memory * pressure we can have a lot of tasks calling us and all passing the * same nr_to_scan value, but in reality we may need only to free * nr_to_scan extent maps (or less). In case we need to free more than * that, we will be called again by the fs shrinker, so no worries about * not doing enough work to reclaim memory from extent maps. * We can also be repeatedly called with the same nr_to_scan value * simply because the shrinker runs asynchronously and multiple calls * to this function are made before the shrinker does enough progress. * * That's why we set the atomic counter to nr_to_scan only if its * current value is zero, instead of incrementing the counter by * nr_to_scan. */ if (atomic64_cmpxchg(&fs_info->em_shrinker_nr_to_scan, 0, nr_to_scan) != 0) return; queue_work(system_unbound_wq, &fs_info->em_shrinker_work); } void btrfs_init_extent_map_shrinker_work(struct btrfs_fs_info *fs_info) { atomic64_set(&fs_info->em_shrinker_nr_to_scan, 0); INIT_WORK(&fs_info->em_shrinker_work, btrfs_extent_map_shrinker_worker); }
// SPDX-License-Identifier: GPL-2.0-only /* * * Copyright 2010-2011 Paul Mackerras, IBM Corp. <[email protected]> */ #include <linux/types.h> #include <linux/string.h> #include <linux/kvm.h> #include <linux/kvm_host.h> #include <linux/hugetlb.h> #include <linux/module.h> #include <linux/log2.h> #include <linux/sizes.h> #include <asm/trace.h> #include <asm/kvm_ppc.h> #include <asm/kvm_book3s.h> #include <asm/book3s/64/mmu-hash.h> #include <asm/hvcall.h> #include <asm/synch.h> #include <asm/ppc-opcode.h> #include <asm/pte-walk.h> /* Translate address of a vmalloc'd thing to a linear map address */ static void *real_vmalloc_addr(void *addr) { return __va(ppc_find_vmap_phys((unsigned long)addr)); } /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */ static int global_invalidates(struct kvm *kvm) { int global; int cpu; /* * If there is only one vcore, and it's currently running, * as indicated by local_paca->kvm_hstate.kvm_vcpu being set, * we can use tlbiel as long as we mark all other physical * cores as potentially having stale TLB entries for this lpid. * Otherwise, don't use tlbiel. */ if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu) global = 0; else global = 1; /* LPID has been switched to host if in virt mode so can't do local */ if (!global && (mfmsr() & (MSR_IR|MSR_DR))) global = 1; if (!global) { /* any other core might now have stale TLB entries... */ smp_wmb(); cpumask_setall(&kvm->arch.need_tlb_flush); cpu = local_paca->kvm_hstate.kvm_vcore->pcpu; cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush); } return global; } /* * Add this HPTE into the chain for the real page. * Must be called with the chain locked; it unlocks the chain. */ void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, unsigned long *rmap, long pte_index, int realmode) { struct revmap_entry *head, *tail; unsigned long i; if (*rmap & KVMPPC_RMAP_PRESENT) { i = *rmap & KVMPPC_RMAP_INDEX; head = &kvm->arch.hpt.rev[i]; if (realmode) head = real_vmalloc_addr(head); tail = &kvm->arch.hpt.rev[head->back]; if (realmode) tail = real_vmalloc_addr(tail); rev->forw = i; rev->back = head->back; tail->forw = pte_index; head->back = pte_index; } else { rev->forw = rev->back = pte_index; *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | pte_index | KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_HPT; } unlock_rmap(rmap); } EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain); /* Update the dirty bitmap of a memslot */ void kvmppc_update_dirty_map(const struct kvm_memory_slot *memslot, unsigned long gfn, unsigned long psize) { unsigned long npages; if (!psize || !memslot->dirty_bitmap) return; npages = (psize + PAGE_SIZE - 1) / PAGE_SIZE; gfn -= memslot->base_gfn; set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages); } EXPORT_SYMBOL_GPL(kvmppc_update_dirty_map); static void kvmppc_set_dirty_from_hpte(struct kvm *kvm, unsigned long hpte_v, unsigned long hpte_gr) { struct kvm_memory_slot *memslot; unsigned long gfn; unsigned long psize; psize = kvmppc_actual_pgsz(hpte_v, hpte_gr); gfn = hpte_rpn(hpte_gr, psize); memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); if (memslot && memslot->dirty_bitmap) kvmppc_update_dirty_map(memslot, gfn, psize); } /* Returns a pointer to the revmap entry for the page mapped by a HPTE */ static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v, unsigned long hpte_gr, struct kvm_memory_slot **memslotp, unsigned long *gfnp) { struct kvm_memory_slot *memslot; unsigned long *rmap; unsigned long gfn; gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr)); memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); if (memslotp) *memslotp = memslot; if (gfnp) *gfnp = gfn; if (!memslot) return NULL; rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]); return rmap; } /* Remove this HPTE from the chain for a real page */ static void remove_revmap_chain(struct kvm *kvm, long pte_index, struct revmap_entry *rev, unsigned long hpte_v, unsigned long hpte_r) { struct revmap_entry *next, *prev; unsigned long ptel, head; unsigned long *rmap; unsigned long rcbits; struct kvm_memory_slot *memslot; unsigned long gfn; rcbits = hpte_r & (HPTE_R_R | HPTE_R_C); ptel = rev->guest_rpte |= rcbits; rmap = revmap_for_hpte(kvm, hpte_v, ptel, &memslot, &gfn); if (!rmap) return; lock_rmap(rmap); head = *rmap & KVMPPC_RMAP_INDEX; next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]); prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]); next->back = rev->back; prev->forw = rev->forw; if (head == pte_index) { head = rev->forw; if (head == pte_index) *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); else *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head; } *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT; if (rcbits & HPTE_R_C) kvmppc_update_dirty_map(memslot, gfn, kvmppc_actual_pgsz(hpte_v, hpte_r)); unlock_rmap(rmap); } long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel, pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret) { unsigned long i, pa, gpa, gfn, psize; unsigned long slot_fn, hva; __be64 *hpte; struct revmap_entry *rev; unsigned long g_ptel; struct kvm_memory_slot *memslot; unsigned hpage_shift; bool is_ci; unsigned long *rmap; pte_t *ptep; unsigned int writing; unsigned long mmu_seq; unsigned long rcbits; if (kvm_is_radix(kvm)) return H_FUNCTION; /* * The HPTE gets used by compute_tlbie_rb() to set TLBIE bits, so * these functions should work together -- must ensure a guest can not * cause problems with the TLBIE that KVM executes. */ if ((pteh >> HPTE_V_SSIZE_SHIFT) & 0x2) { /* B=0b1x is a reserved value, disallow it. */ return H_PARAMETER; } psize = kvmppc_actual_pgsz(pteh, ptel); if (!psize) return H_PARAMETER; writing = hpte_is_writable(ptel); pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); ptel &= ~HPTE_GR_RESERVED; g_ptel = ptel; /* used later to detect if we might have been invalidated */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); /* Find the memslot (if any) for this address */ gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); gfn = gpa >> PAGE_SHIFT; memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); pa = 0; is_ci = false; rmap = NULL; if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) { /* Emulated MMIO - mark this with key=31 */ pteh |= HPTE_V_ABSENT; ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO; goto do_insert; } /* Check if the requested page fits entirely in the memslot. */ if (!slot_is_aligned(memslot, psize)) return H_PARAMETER; slot_fn = gfn - memslot->base_gfn; rmap = &memslot->arch.rmap[slot_fn]; /* Translate to host virtual address */ hva = __gfn_to_hva_memslot(memslot, gfn); arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &hpage_shift); if (ptep) { pte_t pte; unsigned int host_pte_size; if (hpage_shift) host_pte_size = 1ul << hpage_shift; else host_pte_size = PAGE_SIZE; /* * We should always find the guest page size * to <= host page size, if host is using hugepage */ if (host_pte_size < psize) { arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return H_PARAMETER; } pte = kvmppc_read_update_linux_pte(ptep, writing); if (pte_present(pte) && !pte_protnone(pte)) { if (writing && !pte_write(pte)) /* make the actual HPTE be read-only */ ptel = hpte_make_readonly(ptel); is_ci = pte_ci(pte); pa = pte_pfn(pte) << PAGE_SHIFT; pa |= hva & (host_pte_size - 1); pa |= gpa & ~PAGE_MASK; } } arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1); ptel |= pa; if (pa) pteh |= HPTE_V_VALID; else { pteh |= HPTE_V_ABSENT; ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO); } /*If we had host pte mapping then Check WIMG */ if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) { if (is_ci) return H_PARAMETER; /* * Allow guest to map emulated device memory as * uncacheable, but actually make it cacheable. */ ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G); ptel |= HPTE_R_M; } /* Find and lock the HPTEG slot to use */ do_insert: if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; if (likely((flags & H_EXACT) == 0)) { pte_index &= ~7UL; hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); for (i = 0; i < 8; ++i) { if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 && try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | HPTE_V_ABSENT)) break; hpte += 2; } if (i == 8) { /* * Since try_lock_hpte doesn't retry (not even stdcx. * failures), it could be that there is a free slot * but we transiently failed to lock it. Try again, * actually locking each slot and checking it. */ hpte -= 16; for (i = 0; i < 8; ++i) { u64 pte; while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); pte = be64_to_cpu(hpte[0]); if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT))) break; __unlock_hpte(hpte, pte); hpte += 2; } if (i == 8) return H_PTEG_FULL; } pte_index += i; } else { hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID | HPTE_V_ABSENT)) { /* Lock the slot and check again */ u64 pte; while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); pte = be64_to_cpu(hpte[0]); if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) { __unlock_hpte(hpte, pte); return H_PTEG_FULL; } } } /* Save away the guest's idea of the second HPTE dword */ rev = &kvm->arch.hpt.rev[pte_index]; if (realmode) rev = real_vmalloc_addr(rev); if (rev) { rev->guest_rpte = g_ptel; note_hpte_modification(kvm, rev); } /* Link HPTE into reverse-map chain */ if (pteh & HPTE_V_VALID) { if (realmode) rmap = real_vmalloc_addr(rmap); lock_rmap(rmap); /* Check for pending invalidations under the rmap chain lock */ if (mmu_invalidate_retry(kvm, mmu_seq)) { /* inval in progress, write a non-present HPTE */ pteh |= HPTE_V_ABSENT; pteh &= ~HPTE_V_VALID; ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO); unlock_rmap(rmap); } else { kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index, realmode); /* Only set R/C in real HPTE if already set in *rmap */ rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C); } } /* Convert to new format on P9 */ if (cpu_has_feature(CPU_FTR_ARCH_300)) { ptel = hpte_old_to_new_r(pteh, ptel); pteh = hpte_old_to_new_v(pteh); } hpte[1] = cpu_to_be64(ptel); /* Write the first HPTE dword, unlocking the HPTE and making it valid */ eieio(); __unlock_hpte(hpte, pteh); asm volatile("ptesync" : : : "memory"); *pte_idx_ret = pte_index; return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_do_h_enter); long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, long pte_index, unsigned long pteh, unsigned long ptel) { return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel, vcpu->arch.pgdir, true, &vcpu->arch.regs.gpr[4]); } EXPORT_SYMBOL_GPL(kvmppc_h_enter); #ifdef __BIG_ENDIAN__ #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) #else #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index)) #endif static inline int is_mmio_hpte(unsigned long v, unsigned long r) { return ((v & HPTE_V_ABSENT) && (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) == (HPTE_R_KEY_HI | HPTE_R_KEY_LO)); } static inline void fixup_tlbie_lpid(unsigned long rb_value, unsigned long lpid) { if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) { /* Radix flush for a hash guest */ unsigned long rb,rs,prs,r,ric; rb = PPC_BIT(52); /* IS = 2 */ rs = 0; /* lpid = 0 */ prs = 0; /* partition scoped */ r = 1; /* radix format */ ric = 0; /* RIC_FLSUH_TLB */ /* * Need the extra ptesync to make sure we don't * re-order the tlbie */ asm volatile("ptesync": : :"memory"); asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1) : : "r"(rb), "i"(r), "i"(prs), "i"(ric), "r"(rs) : "memory"); } if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) { asm volatile("ptesync": : :"memory"); asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : "r" (rb_value), "r" (lpid)); } } static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues, long npages, int global, bool need_sync) { long i; /* * We use the POWER9 5-operand versions of tlbie and tlbiel here. * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores * the RS field, this is backwards-compatible with P7 and P8. */ if (global) { if (need_sync) asm volatile("ptesync" : : : "memory"); for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (kvm->arch.lpid)); } fixup_tlbie_lpid(rbvalues[i - 1], kvm->arch.lpid); asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } else { if (need_sync) asm volatile("ptesync" : : : "memory"); for (i = 0; i < npages; ++i) { asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : : "r" (rbvalues[i]), "r" (0)); } asm volatile("ptesync" : : : "memory"); } } long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, unsigned long pte_index, unsigned long avpn, unsigned long *hpret) { __be64 *hpte; unsigned long v, r, rb; struct revmap_entry *rev; u64 pte, orig_pte, pte_r; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); pte = orig_pte = be64_to_cpu(hpte[0]); pte_r = be64_to_cpu(hpte[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { pte = hpte_new_to_old_v(pte, pte_r); pte_r = hpte_new_to_old_r(pte_r); } if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) || ((flags & H_ANDCOND) && (pte & avpn) != 0)) { __unlock_hpte(hpte, orig_pte); return H_NOT_FOUND; } rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); v = pte & ~HPTE_V_HVLOCK; if (v & HPTE_V_VALID) { hpte[0] &= ~cpu_to_be64(HPTE_V_VALID); rb = compute_tlbie_rb(v, pte_r, pte_index); do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true); /* * The reference (R) and change (C) bits in a HPT * entry can be set by hardware at any time up until * the HPTE is invalidated and the TLB invalidation * sequence has completed. This means that when * removing a HPTE, we need to re-read the HPTE after * the invalidation sequence has completed in order to * obtain reliable values of R and C. */ remove_revmap_chain(kvm, pte_index, rev, v, be64_to_cpu(hpte[1])); } r = rev->guest_rpte & ~HPTE_GR_RESERVED; note_hpte_modification(kvm, rev); unlock_hpte(hpte, 0); if (is_mmio_hpte(v, pte_r)) atomic64_inc(&kvm->arch.mmio_update); if (v & HPTE_V_ABSENT) v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID; hpret[0] = v; hpret[1] = r; return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_do_h_remove); long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index, unsigned long avpn) { return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn, &vcpu->arch.regs.gpr[4]); } EXPORT_SYMBOL_GPL(kvmppc_h_remove); long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu) { struct kvm *kvm = vcpu->kvm; unsigned long *args = &vcpu->arch.regs.gpr[4]; __be64 *hp, *hptes[4]; unsigned long tlbrb[4]; long int i, j, k, n, found, indexes[4]; unsigned long flags, req, pte_index, rcbits; int global; long int ret = H_SUCCESS; struct revmap_entry *rev, *revs[4]; u64 hp0, hp1; if (kvm_is_radix(kvm)) return H_FUNCTION; global = global_invalidates(kvm); for (i = 0; i < 4 && ret == H_SUCCESS; ) { n = 0; for (; i < 4; ++i) { j = i * 2; pte_index = args[j]; flags = pte_index >> 56; pte_index &= ((1ul << 56) - 1); req = flags >> 6; flags &= 3; if (req == 3) { /* no more requests */ i = 4; break; } if (req != 1 || flags == 3 || pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) { /* parameter error */ args[j] = ((0xa0 | flags) << 56) + pte_index; ret = H_PARAMETER; break; } hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4)); /* to avoid deadlock, don't spin except for first */ if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) { if (n) break; while (!try_lock_hpte(hp, HPTE_V_HVLOCK)) cpu_relax(); } found = 0; hp0 = be64_to_cpu(hp[0]); hp1 = be64_to_cpu(hp[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { hp0 = hpte_new_to_old_v(hp0, hp1); hp1 = hpte_new_to_old_r(hp1); } if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) { switch (flags & 3) { case 0: /* absolute */ found = 1; break; case 1: /* andcond */ if (!(hp0 & args[j + 1])) found = 1; break; case 2: /* AVPN */ if ((hp0 & ~0x7fUL) == args[j + 1]) found = 1; break; } } if (!found) { hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK); args[j] = ((0x90 | flags) << 56) + pte_index; continue; } args[j] = ((0x80 | flags) << 56) + pte_index; rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); note_hpte_modification(kvm, rev); if (!(hp0 & HPTE_V_VALID)) { /* insert R and C bits from PTE */ rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); args[j] |= rcbits << (56 - 5); hp[0] = 0; if (is_mmio_hpte(hp0, hp1)) atomic64_inc(&kvm->arch.mmio_update); continue; } /* leave it locked */ hp[0] &= ~cpu_to_be64(HPTE_V_VALID); tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index); indexes[n] = j; hptes[n] = hp; revs[n] = rev; ++n; } if (!n) break; /* Now that we've collected a batch, do the tlbies */ do_tlbies(kvm, tlbrb, n, global, true); /* Read PTE low words after tlbie to get final R/C values */ for (k = 0; k < n; ++k) { j = indexes[k]; pte_index = args[j] & ((1ul << 56) - 1); hp = hptes[k]; rev = revs[k]; remove_revmap_chain(kvm, pte_index, rev, be64_to_cpu(hp[0]), be64_to_cpu(hp[1])); rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C); args[j] |= rcbits << (56 - 5); __unlock_hpte(hp, 0); } } return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_bulk_remove); long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index, unsigned long avpn) { struct kvm *kvm = vcpu->kvm; __be64 *hpte; struct revmap_entry *rev; unsigned long v, r, rb, mask, bits; u64 pte_v, pte_r; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); v = pte_v = be64_to_cpu(hpte[0]); if (cpu_has_feature(CPU_FTR_ARCH_300)) v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1])); if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 || ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) { __unlock_hpte(hpte, pte_v); return H_NOT_FOUND; } pte_r = be64_to_cpu(hpte[1]); bits = (flags << 55) & HPTE_R_PP0; bits |= (flags << 48) & HPTE_R_KEY_HI; bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); /* Update guest view of 2nd HPTE dword */ mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI | HPTE_R_KEY_LO; rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); if (rev) { r = (rev->guest_rpte & ~mask) | bits; rev->guest_rpte = r; note_hpte_modification(kvm, rev); } /* Update HPTE */ if (v & HPTE_V_VALID) { /* * If the page is valid, don't let it transition from * readonly to writable. If it should be writable, we'll * take a trap and let the page fault code sort it out. */ r = (pte_r & ~mask) | bits; if (hpte_is_writable(r) && !hpte_is_writable(pte_r)) r = hpte_make_readonly(r); /* If the PTE is changing, invalidate it first */ if (r != pte_r) { rb = compute_tlbie_rb(v, r, pte_index); hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) | HPTE_V_ABSENT); do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true); /* Don't lose R/C bit updates done by hardware */ r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C); hpte[1] = cpu_to_be64(r); } } unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK); asm volatile("ptesync" : : : "memory"); if (is_mmio_hpte(v, pte_r)) atomic64_inc(&kvm->arch.mmio_update); return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_h_protect); long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index) { struct kvm *kvm = vcpu->kvm; __be64 *hpte; unsigned long v, r; int i, n = 1; struct revmap_entry *rev = NULL; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; if (flags & H_READ_4) { pte_index &= ~3; n = 4; } rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); for (i = 0; i < n; ++i, ++pte_index) { hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; r = be64_to_cpu(hpte[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { v = hpte_new_to_old_v(v, r); r = hpte_new_to_old_r(r); } if (v & HPTE_V_ABSENT) { v &= ~HPTE_V_ABSENT; v |= HPTE_V_VALID; } if (v & HPTE_V_VALID) { r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C)); r &= ~HPTE_GR_RESERVED; } kvmppc_set_gpr(vcpu, 4 + i * 2, v); kvmppc_set_gpr(vcpu, 5 + i * 2, r); } return H_SUCCESS; } EXPORT_SYMBOL_GPL(kvmppc_h_read); long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index) { struct kvm *kvm = vcpu->kvm; __be64 *hpte; unsigned long v, r, gr; struct revmap_entry *rev; unsigned long *rmap; long ret = H_NOT_FOUND; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); v = be64_to_cpu(hpte[0]); r = be64_to_cpu(hpte[1]); if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) goto out; gr = rev->guest_rpte; if (rev->guest_rpte & HPTE_R_R) { rev->guest_rpte &= ~HPTE_R_R; note_hpte_modification(kvm, rev); } if (v & HPTE_V_VALID) { gr |= r & (HPTE_R_R | HPTE_R_C); if (r & HPTE_R_R) { kvmppc_clear_ref_hpte(kvm, hpte, pte_index); rmap = revmap_for_hpte(kvm, v, gr, NULL, NULL); if (rmap) { lock_rmap(rmap); *rmap |= KVMPPC_RMAP_REFERENCED; unlock_rmap(rmap); } } } kvmppc_set_gpr(vcpu, 4, gr); ret = H_SUCCESS; out: unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_clear_ref); long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long pte_index) { struct kvm *kvm = vcpu->kvm; __be64 *hpte; unsigned long v, r, gr; struct revmap_entry *rev; long ret = H_NOT_FOUND; if (kvm_is_radix(kvm)) return H_FUNCTION; if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) return H_PARAMETER; rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]); hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4)); while (!try_lock_hpte(hpte, HPTE_V_HVLOCK)) cpu_relax(); v = be64_to_cpu(hpte[0]); r = be64_to_cpu(hpte[1]); if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) goto out; gr = rev->guest_rpte; if (gr & HPTE_R_C) { rev->guest_rpte &= ~HPTE_R_C; note_hpte_modification(kvm, rev); } if (v & HPTE_V_VALID) { /* need to make it temporarily absent so C is stable */ hpte[0] |= cpu_to_be64(HPTE_V_ABSENT); kvmppc_invalidate_hpte(kvm, hpte, pte_index); r = be64_to_cpu(hpte[1]); gr |= r & (HPTE_R_R | HPTE_R_C); if (r & HPTE_R_C) { hpte[1] = cpu_to_be64(r & ~HPTE_R_C); eieio(); kvmppc_set_dirty_from_hpte(kvm, v, gr); } } kvmppc_set_gpr(vcpu, 4, gr); ret = H_SUCCESS; out: unlock_hpte(hpte, v & ~HPTE_V_HVLOCK); return ret; } EXPORT_SYMBOL_GPL(kvmppc_h_clear_mod); static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long mmu_seq, unsigned long gpa, int writing, unsigned long *hpa, struct kvm_memory_slot **memslot_p) { struct kvm *kvm = vcpu->kvm; struct kvm_memory_slot *memslot; unsigned long gfn, hva, pa, psize = PAGE_SHIFT; unsigned int shift; pte_t *ptep, pte; /* Find the memslot for this address */ gfn = gpa >> PAGE_SHIFT; memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn); if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) return H_PARAMETER; /* Translate to host virtual address */ hva = __gfn_to_hva_memslot(memslot, gfn); /* Try to find the host pte for that virtual address */ ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); if (!ptep) return H_TOO_HARD; pte = kvmppc_read_update_linux_pte(ptep, writing); if (!pte_present(pte)) return H_TOO_HARD; /* Convert to a physical address */ if (shift) psize = 1UL << shift; pa = pte_pfn(pte) << PAGE_SHIFT; pa |= hva & (psize - 1); pa |= gpa & ~PAGE_MASK; if (hpa) *hpa = pa; if (memslot_p) *memslot_p = memslot; return H_SUCCESS; } static long kvmppc_do_h_page_init_zero(struct kvm_vcpu *vcpu, unsigned long dest) { struct kvm_memory_slot *memslot; struct kvm *kvm = vcpu->kvm; unsigned long pa, mmu_seq; long ret = H_SUCCESS; int i; /* Used later to detect if we might have been invalidated */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &pa, &memslot); if (ret != H_SUCCESS) goto out_unlock; /* Zero the page */ for (i = 0; i < SZ_4K; i += L1_CACHE_BYTES, pa += L1_CACHE_BYTES) dcbz((void *)pa); kvmppc_update_dirty_map(memslot, dest >> PAGE_SHIFT, PAGE_SIZE); out_unlock: arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return ret; } static long kvmppc_do_h_page_init_copy(struct kvm_vcpu *vcpu, unsigned long dest, unsigned long src) { unsigned long dest_pa, src_pa, mmu_seq; struct kvm_memory_slot *dest_memslot; struct kvm *kvm = vcpu->kvm; long ret = H_SUCCESS; /* Used later to detect if we might have been invalidated */ mmu_seq = kvm->mmu_invalidate_seq; smp_rmb(); arch_spin_lock(&kvm->mmu_lock.rlock.raw_lock); ret = kvmppc_get_hpa(vcpu, mmu_seq, dest, 1, &dest_pa, &dest_memslot); if (ret != H_SUCCESS) goto out_unlock; ret = kvmppc_get_hpa(vcpu, mmu_seq, src, 0, &src_pa, NULL); if (ret != H_SUCCESS) goto out_unlock; /* Copy the page */ memcpy((void *)dest_pa, (void *)src_pa, SZ_4K); kvmppc_update_dirty_map(dest_memslot, dest >> PAGE_SHIFT, PAGE_SIZE); out_unlock: arch_spin_unlock(&kvm->mmu_lock.rlock.raw_lock); return ret; } long kvmppc_rm_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, unsigned long dest, unsigned long src) { struct kvm *kvm = vcpu->kvm; u64 pg_mask = SZ_4K - 1; /* 4K page size */ long ret = H_SUCCESS; /* Don't handle radix mode here, go up to the virtual mode handler */ if (kvm_is_radix(kvm)) return H_TOO_HARD; /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) return H_PARAMETER; /* dest (and src if copy_page flag set) must be page aligned */ if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) return H_PARAMETER; /* zero and/or copy the page as determined by the flags */ if (flags & H_COPY_PAGE) ret = kvmppc_do_h_page_init_copy(vcpu, dest, src); else if (flags & H_ZERO_PAGE) ret = kvmppc_do_h_page_init_zero(vcpu, dest); /* We can ignore the other flags */ return ret; } void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep, unsigned long pte_index) { unsigned long rb; u64 hp0, hp1; hptep[0] &= ~cpu_to_be64(HPTE_V_VALID); hp0 = be64_to_cpu(hptep[0]); hp1 = be64_to_cpu(hptep[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { hp0 = hpte_new_to_old_v(hp0, hp1); hp1 = hpte_new_to_old_r(hp1); } rb = compute_tlbie_rb(hp0, hp1, pte_index); do_tlbies(kvm, &rb, 1, 1, true); } EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte); void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep, unsigned long pte_index) { unsigned long rb; unsigned char rbyte; u64 hp0, hp1; hp0 = be64_to_cpu(hptep[0]); hp1 = be64_to_cpu(hptep[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { hp0 = hpte_new_to_old_v(hp0, hp1); hp1 = hpte_new_to_old_r(hp1); } rb = compute_tlbie_rb(hp0, hp1, pte_index); rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8; /* modify only the second-last byte, which contains the ref bit */ *((char *)hptep + 14) = rbyte; do_tlbies(kvm, &rb, 1, 1, false); } EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte); static int slb_base_page_shift[4] = { 24, /* 16M */ 16, /* 64k */ 34, /* 16G */ 20, /* 1M, unsupported */ }; static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu, unsigned long eaddr, unsigned long slb_v, long mmio_update) { struct mmio_hpte_cache_entry *entry = NULL; unsigned int pshift; unsigned int i; for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) { entry = &vcpu->arch.mmio_cache.entry[i]; if (entry->mmio_update == mmio_update) { pshift = entry->slb_base_pshift; if ((entry->eaddr >> pshift) == (eaddr >> pshift) && entry->slb_v == slb_v) return entry; } } return NULL; } static struct mmio_hpte_cache_entry * next_mmio_cache_entry(struct kvm_vcpu *vcpu) { unsigned int index = vcpu->arch.mmio_cache.index; vcpu->arch.mmio_cache.index++; if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE) vcpu->arch.mmio_cache.index = 0; return &vcpu->arch.mmio_cache.entry[index]; } /* When called from virtmode, this func should be protected by * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK * can trigger deadlock issue. */ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v, unsigned long valid) { unsigned int i; unsigned int pshift; unsigned long somask; unsigned long vsid, hash; unsigned long avpn; __be64 *hpte; unsigned long mask, val; unsigned long v, r, orig_v; /* Get page shift, work out hash and AVPN etc. */ mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY; val = 0; pshift = 12; if (slb_v & SLB_VSID_L) { mask |= HPTE_V_LARGE; val |= HPTE_V_LARGE; pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4]; } if (slb_v & SLB_VSID_B_1T) { somask = (1UL << 40) - 1; vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T; vsid ^= vsid << 25; } else { somask = (1UL << 28) - 1; vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT; } hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt); avpn = slb_v & ~(somask >> 16); /* also includes B */ avpn |= (eaddr & somask) >> 16; if (pshift >= 24) avpn &= ~((1UL << (pshift - 16)) - 1); else avpn &= ~0x7fUL; val |= avpn; for (;;) { hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7)); for (i = 0; i < 16; i += 2) { /* Read the PTE racily */ v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK; if (cpu_has_feature(CPU_FTR_ARCH_300)) v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1])); /* Check valid/absent, hash, segment size and AVPN */ if (!(v & valid) || (v & mask) != val) continue; /* Lock the PTE and read it under the lock */ while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK)) cpu_relax(); v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK; r = be64_to_cpu(hpte[i+1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { v = hpte_new_to_old_v(v, r); r = hpte_new_to_old_r(r); } /* * Check the HPTE again, including base page size */ if ((v & valid) && (v & mask) == val && kvmppc_hpte_base_page_shift(v, r) == pshift) /* Return with the HPTE still locked */ return (hash << 3) + (i >> 1); __unlock_hpte(&hpte[i], orig_v); } if (val & HPTE_V_SECONDARY) break; val |= HPTE_V_SECONDARY; hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt); } return -1; } EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte); /* * Called in real mode to check whether an HPTE not found fault * is due to accessing a paged-out page or an emulated MMIO page, * or if a protection fault is due to accessing a page that the * guest wanted read/write access to but which we made read-only. * Returns a possibly modified status (DSISR) value if not * (i.e. pass the interrupt to the guest), * -1 to pass the fault up to host kernel mode code, -2 to do that * and also load the instruction word (for MMIO emulation), * or 0 if we should make the guest retry the access. */ long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, unsigned long slb_v, unsigned int status, bool data) { struct kvm *kvm = vcpu->kvm; long int index; unsigned long v, r, gr, orig_v; __be64 *hpte; unsigned long valid; struct revmap_entry *rev; unsigned long pp, key; struct mmio_hpte_cache_entry *cache_entry = NULL; long mmio_update = 0; /* For protection fault, expect to find a valid HPTE */ valid = HPTE_V_VALID; if (status & DSISR_NOHPTE) { valid |= HPTE_V_ABSENT; mmio_update = atomic64_read(&kvm->arch.mmio_update); cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update); } if (cache_entry) { index = cache_entry->pte_index; v = cache_entry->hpte_v; r = cache_entry->hpte_r; gr = cache_entry->rpte; } else { index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid); if (index < 0) { if (status & DSISR_NOHPTE) return status; /* there really was no HPTE */ return 0; /* for prot fault, HPTE disappeared */ } hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK; r = be64_to_cpu(hpte[1]); if (cpu_has_feature(CPU_FTR_ARCH_300)) { v = hpte_new_to_old_v(v, r); r = hpte_new_to_old_r(r); } rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]); gr = rev->guest_rpte; unlock_hpte(hpte, orig_v); } /* For not found, if the HPTE is valid by now, retry the instruction */ if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID)) return 0; /* Check access permissions to the page */ pp = gr & (HPTE_R_PP0 | HPTE_R_PP); key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */ if (!data) { if (gr & (HPTE_R_N | HPTE_R_G)) return status | SRR1_ISI_N_G_OR_CIP; if (!hpte_read_permission(pp, slb_v & key)) return status | SRR1_ISI_PROT; } else if (status & DSISR_ISSTORE) { /* check write permission */ if (!hpte_write_permission(pp, slb_v & key)) return status | DSISR_PROTFAULT; } else { if (!hpte_read_permission(pp, slb_v & key)) return status | DSISR_PROTFAULT; } /* Check storage key, if applicable */ if (data && (vcpu->arch.shregs.msr & MSR_DR)) { unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr); if (status & DSISR_ISSTORE) perm >>= 1; if (perm & 1) return status | DSISR_KEYFAULT; } /* Save HPTE info for virtual-mode handler */ vcpu->arch.pgfault_addr = addr; vcpu->arch.pgfault_index = index; vcpu->arch.pgfault_hpte[0] = v; vcpu->arch.pgfault_hpte[1] = r; vcpu->arch.pgfault_cache = cache_entry; /* Check the storage key to see if it is possibly emulated MMIO */ if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) == (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) { if (!cache_entry) { unsigned int pshift = 12; unsigned int pshift_index; if (slb_v & SLB_VSID_L) { pshift_index = ((slb_v & SLB_VSID_LP) >> 4); pshift = slb_base_page_shift[pshift_index]; } cache_entry = next_mmio_cache_entry(vcpu); cache_entry->eaddr = addr; cache_entry->slb_base_pshift = pshift; cache_entry->pte_index = index; cache_entry->hpte_v = v; cache_entry->hpte_r = r; cache_entry->rpte = gr; cache_entry->slb_v = slb_v; cache_entry->mmio_update = mmio_update; } if (data && (vcpu->arch.shregs.msr & MSR_IR)) return -2; /* MMIO emulation - load instr word */ } return -1; /* send fault up to host kernel mode */ } EXPORT_SYMBOL_GPL(kvmppc_hpte_hv_fault);
// SPDX-License-Identifier: GPL-2.0-only /* * Old U-boot compatibility for AmigaOne * * Author: Gerhard Pircher ([email protected]) * * Based on cuboot-83xx.c * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #include "ppcboot.h" static bd_t bd; static void platform_fixups(void) { dt_fixup_memory(bd.bi_memstart, bd.bi_memsize); dt_fixup_cpu_clocks(bd.bi_intfreq, bd.bi_busfreq / 4, bd.bi_busfreq); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); fdt_init(_dtb_start); serial_console_init(); platform_ops.fixups = platform_fixups; }
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __LINUX_BRIDGE_EBT_ARPREPLY_H #define __LINUX_BRIDGE_EBT_ARPREPLY_H #include <linux/if_ether.h> struct ebt_arpreply_info { unsigned char mac[ETH_ALEN]; int target; }; #define EBT_ARPREPLY_TARGET "arpreply" #endif
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Driver for the High Speed UART DMA * * Copyright (C) 2015 Intel Corporation * * Partially based on the bits found in drivers/tty/serial/mfd.c. */ #ifndef __DMA_HSU_H__ #define __DMA_HSU_H__ #include <linux/bits.h> #include <linux/container_of.h> #include <linux/io.h> #include <linux/types.h> #include <linux/dma/hsu.h> #include "../virt-dma.h" #define HSU_CH_SR 0x00 /* channel status */ #define HSU_CH_CR 0x04 /* channel control */ #define HSU_CH_DCR 0x08 /* descriptor control */ #define HSU_CH_BSR 0x10 /* FIFO buffer size */ #define HSU_CH_MTSR 0x14 /* minimum transfer size */ #define HSU_CH_DxSAR(x) (0x20 + 8 * (x)) /* desc start addr */ #define HSU_CH_DxTSR(x) (0x24 + 8 * (x)) /* desc transfer size */ #define HSU_CH_D0SAR 0x20 /* desc 0 start addr */ #define HSU_CH_D0TSR 0x24 /* desc 0 transfer size */ #define HSU_CH_D1SAR 0x28 #define HSU_CH_D1TSR 0x2c #define HSU_CH_D2SAR 0x30 #define HSU_CH_D2TSR 0x34 #define HSU_CH_D3SAR 0x38 #define HSU_CH_D3TSR 0x3c #define HSU_DMA_CHAN_NR_DESC 4 #define HSU_DMA_CHAN_LENGTH 0x40 /* Bits in HSU_CH_SR */ #define HSU_CH_SR_DESCTO(x) BIT(8 + (x)) #define HSU_CH_SR_DESCTO_ANY GENMASK(11, 8) #define HSU_CH_SR_CHE BIT(15) #define HSU_CH_SR_DESCE(x) BIT(16 + (x)) #define HSU_CH_SR_DESCE_ANY GENMASK(19, 16) #define HSU_CH_SR_CDESC_ANY GENMASK(31, 30) /* Bits in HSU_CH_CR */ #define HSU_CH_CR_CHA BIT(0) #define HSU_CH_CR_CHD BIT(1) /* Bits in HSU_CH_DCR */ #define HSU_CH_DCR_DESCA(x) BIT(0 + (x)) #define HSU_CH_DCR_CHSOD(x) BIT(8 + (x)) #define HSU_CH_DCR_CHSOTO BIT(14) #define HSU_CH_DCR_CHSOE BIT(15) #define HSU_CH_DCR_CHDI(x) BIT(16 + (x)) #define HSU_CH_DCR_CHEI BIT(23) #define HSU_CH_DCR_CHTOI(x) BIT(24 + (x)) /* Bits in HSU_CH_DxTSR */ #define HSU_CH_DxTSR_MASK GENMASK(15, 0) #define HSU_CH_DxTSR_TSR(x) ((x) & HSU_CH_DxTSR_MASK) struct hsu_dma_sg { dma_addr_t addr; unsigned int len; }; struct hsu_dma_desc { struct virt_dma_desc vdesc; enum dma_transfer_direction direction; struct hsu_dma_sg *sg; unsigned int nents; size_t length; unsigned int active; enum dma_status status; }; static inline struct hsu_dma_desc *to_hsu_dma_desc(struct virt_dma_desc *vdesc) { return container_of(vdesc, struct hsu_dma_desc, vdesc); } struct hsu_dma_chan { struct virt_dma_chan vchan; void __iomem *reg; /* hardware configuration */ enum dma_transfer_direction direction; struct dma_slave_config config; struct hsu_dma_desc *desc; }; static inline struct hsu_dma_chan *to_hsu_dma_chan(struct dma_chan *chan) { return container_of(chan, struct hsu_dma_chan, vchan.chan); } static inline u32 hsu_chan_readl(struct hsu_dma_chan *hsuc, int offset) { return readl(hsuc->reg + offset); } static inline void hsu_chan_writel(struct hsu_dma_chan *hsuc, int offset, u32 value) { writel(value, hsuc->reg + offset); } struct hsu_dma { struct dma_device dma; /* channels */ struct hsu_dma_chan *chan; unsigned short nr_channels; }; static inline struct hsu_dma *to_hsu_dma(struct dma_device *ddev) { return container_of(ddev, struct hsu_dma, dma); } #endif /* __DMA_HSU_H__ */
// SPDX-License-Identifier: GPL-2.0 /* * Copyright IBM Corp. 2004, 2010 * Interface implementation for communication with the z/VM control program * * Author(s): Christian Borntraeger <[email protected]> * * z/VMs CP offers the possibility to issue commands via the diagnose code 8 * this driver implements a character device that issues these commands and * returns the answer of CP. * * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS */ #include <linux/fs.h> #include <linux/init.h> #include <linux/compat.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/uaccess.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/cma.h> #include <linux/mm.h> #include <asm/cpcmd.h> #include <asm/debug.h> #include <asm/vmcp.h> struct vmcp_session { char *response; unsigned int bufsize; unsigned int cma_alloc : 1; int resp_size; int resp_code; struct mutex mutex; }; static debug_info_t *vmcp_debug; static unsigned long vmcp_cma_size __initdata = CONFIG_VMCP_CMA_SIZE * 1024 * 1024; static struct cma *vmcp_cma; static int __init early_parse_vmcp_cma(char *p) { if (!p) return 1; vmcp_cma_size = ALIGN(memparse(p, NULL), PAGE_SIZE); return 0; } early_param("vmcp_cma", early_parse_vmcp_cma); void __init vmcp_cma_reserve(void) { if (!MACHINE_IS_VM) return; cma_declare_contiguous(0, vmcp_cma_size, 0, 0, 0, false, "vmcp", &vmcp_cma); } static void vmcp_response_alloc(struct vmcp_session *session) { struct page *page = NULL; int nr_pages, order; order = get_order(session->bufsize); nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT; /* * For anything below order 3 allocations rely on the buddy * allocator. If such low-order allocations can't be handled * anymore the system won't work anyway. */ if (order > 2) page = cma_alloc(vmcp_cma, nr_pages, 0, false); if (page) { session->response = (char *)page_to_virt(page); session->cma_alloc = 1; return; } session->response = (char *)__get_free_pages(GFP_KERNEL | __GFP_RETRY_MAYFAIL, order); } static void vmcp_response_free(struct vmcp_session *session) { int nr_pages, order; struct page *page; if (!session->response) return; order = get_order(session->bufsize); nr_pages = ALIGN(session->bufsize, PAGE_SIZE) >> PAGE_SHIFT; if (session->cma_alloc) { page = virt_to_page(session->response); cma_release(vmcp_cma, page, nr_pages); session->cma_alloc = 0; } else { free_pages((unsigned long)session->response, order); } session->response = NULL; } static int vmcp_open(struct inode *inode, struct file *file) { struct vmcp_session *session; if (!capable(CAP_SYS_ADMIN)) return -EPERM; session = kmalloc(sizeof(*session), GFP_KERNEL); if (!session) return -ENOMEM; session->bufsize = PAGE_SIZE; session->response = NULL; session->resp_size = 0; mutex_init(&session->mutex); file->private_data = session; return nonseekable_open(inode, file); } static int vmcp_release(struct inode *inode, struct file *file) { struct vmcp_session *session; session = file->private_data; file->private_data = NULL; vmcp_response_free(session); kfree(session); return 0; } static ssize_t vmcp_read(struct file *file, char __user *buff, size_t count, loff_t *ppos) { ssize_t ret; size_t size; struct vmcp_session *session; session = file->private_data; if (mutex_lock_interruptible(&session->mutex)) return -ERESTARTSYS; if (!session->response) { mutex_unlock(&session->mutex); return 0; } size = min_t(size_t, session->resp_size, session->bufsize); ret = simple_read_from_buffer(buff, count, ppos, session->response, size); mutex_unlock(&session->mutex); return ret; } static ssize_t vmcp_write(struct file *file, const char __user *buff, size_t count, loff_t *ppos) { char *cmd; struct vmcp_session *session; if (count > 240) return -EINVAL; cmd = memdup_user_nul(buff, count); if (IS_ERR(cmd)) return PTR_ERR(cmd); session = file->private_data; if (mutex_lock_interruptible(&session->mutex)) { kfree(cmd); return -ERESTARTSYS; } if (!session->response) vmcp_response_alloc(session); if (!session->response) { mutex_unlock(&session->mutex); kfree(cmd); return -ENOMEM; } debug_text_event(vmcp_debug, 1, cmd); session->resp_size = cpcmd(cmd, session->response, session->bufsize, &session->resp_code); mutex_unlock(&session->mutex); kfree(cmd); *ppos = 0; /* reset the file pointer after a command */ return count; } /* * These ioctls are available, as the semantics of the diagnose 8 call * does not fit very well into a Linux call. Diagnose X'08' is described in * CP Programming Services SC24-6084-00 * * VMCP_GETCODE: gives the CP return code back to user space * VMCP_SETBUF: sets the response buffer for the next write call. diagnose 8 * expects adjacent pages in real storage and to make matters worse, we * dont know the size of the response. Therefore we default to PAGESIZE and * let userspace to change the response size, if userspace expects a bigger * response */ static long vmcp_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct vmcp_session *session; int ret = -ENOTTY; int __user *argp; session = file->private_data; if (is_compat_task()) argp = compat_ptr(arg); else argp = (int __user *)arg; if (mutex_lock_interruptible(&session->mutex)) return -ERESTARTSYS; switch (cmd) { case VMCP_GETCODE: ret = put_user(session->resp_code, argp); break; case VMCP_SETBUF: vmcp_response_free(session); ret = get_user(session->bufsize, argp); if (ret) session->bufsize = PAGE_SIZE; if (!session->bufsize || get_order(session->bufsize) > 8) { session->bufsize = PAGE_SIZE; ret = -EINVAL; } break; case VMCP_GETSIZE: ret = put_user(session->resp_size, argp); break; default: break; } mutex_unlock(&session->mutex); return ret; } static const struct file_operations vmcp_fops = { .owner = THIS_MODULE, .open = vmcp_open, .release = vmcp_release, .read = vmcp_read, .write = vmcp_write, .unlocked_ioctl = vmcp_ioctl, .compat_ioctl = vmcp_ioctl, }; static struct miscdevice vmcp_dev = { .name = "vmcp", .minor = MISC_DYNAMIC_MINOR, .fops = &vmcp_fops, }; static int __init vmcp_init(void) { int ret; if (!MACHINE_IS_VM) return 0; vmcp_debug = debug_register("vmcp", 1, 1, 240); if (!vmcp_debug) return -ENOMEM; ret = debug_register_view(vmcp_debug, &debug_hex_ascii_view); if (ret) { debug_unregister(vmcp_debug); return ret; } ret = misc_register(&vmcp_dev); if (ret) debug_unregister(vmcp_debug); return ret; } device_initcall(vmcp_init);
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012-2014,2017 The Linux Foundation. All rights reserved. * Copyright (c) 2018-2020, Linaro Limited */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/reset.h> #include <linux/slab.h> #define PHY_CTRL0 0x6C #define PHY_CTRL1 0x70 #define PHY_CTRL2 0x74 #define PHY_CTRL4 0x7C /* PHY_CTRL bits */ #define REF_PHY_EN BIT(0) #define LANE0_PWR_ON BIT(2) #define SWI_PCS_CLK_SEL BIT(4) #define TST_PWR_DOWN BIT(4) #define PHY_RESET BIT(7) #define NUM_BULK_CLKS 3 #define NUM_BULK_REGS 2 struct ssphy_priv { void __iomem *base; struct device *dev; struct reset_control *reset_com; struct reset_control *reset_phy; struct regulator_bulk_data regs[NUM_BULK_REGS]; struct clk_bulk_data clks[NUM_BULK_CLKS]; enum phy_mode mode; }; static inline void qcom_ssphy_updatel(void __iomem *addr, u32 mask, u32 val) { writel((readl(addr) & ~mask) | val, addr); } static int qcom_ssphy_do_reset(struct ssphy_priv *priv) { int ret; if (!priv->reset_com) { qcom_ssphy_updatel(priv->base + PHY_CTRL1, PHY_RESET, PHY_RESET); usleep_range(10, 20); qcom_ssphy_updatel(priv->base + PHY_CTRL1, PHY_RESET, 0); } else { ret = reset_control_assert(priv->reset_com); if (ret) { dev_err(priv->dev, "Failed to assert reset com\n"); return ret; } ret = reset_control_assert(priv->reset_phy); if (ret) { dev_err(priv->dev, "Failed to assert reset phy\n"); return ret; } usleep_range(10, 20); ret = reset_control_deassert(priv->reset_com); if (ret) { dev_err(priv->dev, "Failed to deassert reset com\n"); return ret; } ret = reset_control_deassert(priv->reset_phy); if (ret) { dev_err(priv->dev, "Failed to deassert reset phy\n"); return ret; } } return 0; } static int qcom_ssphy_power_on(struct phy *phy) { struct ssphy_priv *priv = phy_get_drvdata(phy); int ret; ret = regulator_bulk_enable(NUM_BULK_REGS, priv->regs); if (ret) return ret; ret = clk_bulk_prepare_enable(NUM_BULK_CLKS, priv->clks); if (ret) goto err_disable_regulator; ret = qcom_ssphy_do_reset(priv); if (ret) goto err_disable_clock; writeb(SWI_PCS_CLK_SEL, priv->base + PHY_CTRL0); qcom_ssphy_updatel(priv->base + PHY_CTRL4, LANE0_PWR_ON, LANE0_PWR_ON); qcom_ssphy_updatel(priv->base + PHY_CTRL2, REF_PHY_EN, REF_PHY_EN); qcom_ssphy_updatel(priv->base + PHY_CTRL4, TST_PWR_DOWN, 0); return 0; err_disable_clock: clk_bulk_disable_unprepare(NUM_BULK_CLKS, priv->clks); err_disable_regulator: regulator_bulk_disable(NUM_BULK_REGS, priv->regs); return ret; } static int qcom_ssphy_power_off(struct phy *phy) { struct ssphy_priv *priv = phy_get_drvdata(phy); qcom_ssphy_updatel(priv->base + PHY_CTRL4, LANE0_PWR_ON, 0); qcom_ssphy_updatel(priv->base + PHY_CTRL2, REF_PHY_EN, 0); qcom_ssphy_updatel(priv->base + PHY_CTRL4, TST_PWR_DOWN, TST_PWR_DOWN); clk_bulk_disable_unprepare(NUM_BULK_CLKS, priv->clks); regulator_bulk_disable(NUM_BULK_REGS, priv->regs); return 0; } static int qcom_ssphy_init_clock(struct ssphy_priv *priv) { priv->clks[0].id = "ref"; priv->clks[1].id = "ahb"; priv->clks[2].id = "pipe"; return devm_clk_bulk_get(priv->dev, NUM_BULK_CLKS, priv->clks); } static int qcom_ssphy_init_regulator(struct ssphy_priv *priv) { int ret; priv->regs[0].supply = "vdd"; priv->regs[1].supply = "vdda1p8"; ret = devm_regulator_bulk_get(priv->dev, NUM_BULK_REGS, priv->regs); if (ret) { if (ret != -EPROBE_DEFER) dev_err(priv->dev, "Failed to get regulators\n"); return ret; } return ret; } static int qcom_ssphy_init_reset(struct ssphy_priv *priv) { priv->reset_com = devm_reset_control_get_optional_exclusive(priv->dev, "com"); if (IS_ERR(priv->reset_com)) { dev_err(priv->dev, "Failed to get reset control com\n"); return PTR_ERR(priv->reset_com); } if (priv->reset_com) { /* if reset_com is present, reset_phy is no longer optional */ priv->reset_phy = devm_reset_control_get_exclusive(priv->dev, "phy"); if (IS_ERR(priv->reset_phy)) { dev_err(priv->dev, "Failed to get reset control phy\n"); return PTR_ERR(priv->reset_phy); } } return 0; } static const struct phy_ops qcom_ssphy_ops = { .power_off = qcom_ssphy_power_off, .power_on = qcom_ssphy_power_on, .owner = THIS_MODULE, }; static int qcom_ssphy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct phy_provider *provider; struct ssphy_priv *priv; struct phy *phy; int ret; priv = devm_kzalloc(dev, sizeof(struct ssphy_priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = dev; priv->mode = PHY_MODE_INVALID; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) return PTR_ERR(priv->base); ret = qcom_ssphy_init_clock(priv); if (ret) return ret; ret = qcom_ssphy_init_reset(priv); if (ret) return ret; ret = qcom_ssphy_init_regulator(priv); if (ret) return ret; phy = devm_phy_create(dev, dev->of_node, &qcom_ssphy_ops); if (IS_ERR(phy)) { dev_err(dev, "Failed to create the SS phy\n"); return PTR_ERR(phy); } phy_set_drvdata(phy, priv); provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); return PTR_ERR_OR_ZERO(provider); } static const struct of_device_id qcom_ssphy_match[] = { { .compatible = "qcom,usb-ss-28nm-phy", }, { }, }; MODULE_DEVICE_TABLE(of, qcom_ssphy_match); static struct platform_driver qcom_ssphy_driver = { .probe = qcom_ssphy_probe, .driver = { .name = "qcom-usb-ssphy", .of_match_table = qcom_ssphy_match, }, }; module_platform_driver(qcom_ssphy_driver); MODULE_DESCRIPTION("Qualcomm SuperSpeed USB PHY driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2023 Isovalent */ #include <sys/random.h> #include <argp.h> #include "bench.h" #include "bpf_hashmap_lookup.skel.h" #include "bpf_util.h" /* BPF triggering benchmarks */ static struct ctx { struct bpf_hashmap_lookup *skel; } ctx; /* only available to kernel, so define it here */ #define BPF_MAX_LOOPS (1<<23) #define MAX_KEY_SIZE 1024 /* the size of the key map */ static struct { __u32 key_size; __u32 map_flags; __u32 max_entries; __u32 nr_entries; __u32 nr_loops; } args = { .key_size = 4, .map_flags = 0, .max_entries = 1000, .nr_entries = 500, .nr_loops = 1000000, }; enum { ARG_KEY_SIZE = 8001, ARG_MAP_FLAGS, ARG_MAX_ENTRIES, ARG_NR_ENTRIES, ARG_NR_LOOPS, }; static const struct argp_option opts[] = { { "key_size", ARG_KEY_SIZE, "KEY_SIZE", 0, "The hashmap key size (max 1024)"}, { "map_flags", ARG_MAP_FLAGS, "MAP_FLAGS", 0, "The hashmap flags passed to BPF_MAP_CREATE"}, { "max_entries", ARG_MAX_ENTRIES, "MAX_ENTRIES", 0, "The hashmap max entries"}, { "nr_entries", ARG_NR_ENTRIES, "NR_ENTRIES", 0, "The number of entries to insert/lookup"}, { "nr_loops", ARG_NR_LOOPS, "NR_LOOPS", 0, "The number of loops for the benchmark"}, {}, }; static error_t parse_arg(int key, char *arg, struct argp_state *state) { long ret; switch (key) { case ARG_KEY_SIZE: ret = strtol(arg, NULL, 10); if (ret < 1 || ret > MAX_KEY_SIZE) { fprintf(stderr, "invalid key_size"); argp_usage(state); } args.key_size = ret; break; case ARG_MAP_FLAGS: ret = strtol(arg, NULL, 0); if (ret < 0 || ret > UINT_MAX) { fprintf(stderr, "invalid map_flags"); argp_usage(state); } args.map_flags = ret; break; case ARG_MAX_ENTRIES: ret = strtol(arg, NULL, 10); if (ret < 1 || ret > UINT_MAX) { fprintf(stderr, "invalid max_entries"); argp_usage(state); } args.max_entries = ret; break; case ARG_NR_ENTRIES: ret = strtol(arg, NULL, 10); if (ret < 1 || ret > UINT_MAX) { fprintf(stderr, "invalid nr_entries"); argp_usage(state); } args.nr_entries = ret; break; case ARG_NR_LOOPS: ret = strtol(arg, NULL, 10); if (ret < 1 || ret > BPF_MAX_LOOPS) { fprintf(stderr, "invalid nr_loops: %ld (min=1 max=%u)\n", ret, BPF_MAX_LOOPS); argp_usage(state); } args.nr_loops = ret; break; default: return ARGP_ERR_UNKNOWN; } return 0; } const struct argp bench_hashmap_lookup_argp = { .options = opts, .parser = parse_arg, }; static void validate(void) { if (env.consumer_cnt != 0) { fprintf(stderr, "benchmark doesn't support consumer!\n"); exit(1); } if (args.nr_entries > args.max_entries) { fprintf(stderr, "args.nr_entries is too big! (max %u, got %u)\n", args.max_entries, args.nr_entries); exit(1); } } static void *producer(void *input) { while (true) { /* trigger the bpf program */ syscall(__NR_getpgid); } return NULL; } static void measure(struct bench_res *res) { } static inline void patch_key(u32 i, u32 *key) { #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ *key = i + 1; #else *key = __builtin_bswap32(i + 1); #endif /* the rest of key is random */ } static void setup(void) { struct bpf_link *link; int map_fd; int ret; int i; setup_libbpf(); ctx.skel = bpf_hashmap_lookup__open(); if (!ctx.skel) { fprintf(stderr, "failed to open skeleton\n"); exit(1); } bpf_map__set_max_entries(ctx.skel->maps.hash_map_bench, args.max_entries); bpf_map__set_key_size(ctx.skel->maps.hash_map_bench, args.key_size); bpf_map__set_value_size(ctx.skel->maps.hash_map_bench, 8); bpf_map__set_map_flags(ctx.skel->maps.hash_map_bench, args.map_flags); ctx.skel->bss->nr_entries = args.nr_entries; ctx.skel->bss->nr_loops = args.nr_loops / args.nr_entries; if (args.key_size > 4) { for (i = 1; i < args.key_size/4; i++) ctx.skel->bss->key[i] = 2654435761 * i; } ret = bpf_hashmap_lookup__load(ctx.skel); if (ret) { bpf_hashmap_lookup__destroy(ctx.skel); fprintf(stderr, "failed to load map: %s", strerror(-ret)); exit(1); } /* fill in the hash_map */ map_fd = bpf_map__fd(ctx.skel->maps.hash_map_bench); for (u64 i = 0; i < args.nr_entries; i++) { patch_key(i, ctx.skel->bss->key); bpf_map_update_elem(map_fd, ctx.skel->bss->key, &i, BPF_ANY); } link = bpf_program__attach(ctx.skel->progs.benchmark); if (!link) { fprintf(stderr, "failed to attach program!\n"); exit(1); } } static inline double events_from_time(u64 time) { if (time) return args.nr_loops * 1000000000llu / time / 1000000.0L; return 0; } static int compute_events(u64 *times, double *events_mean, double *events_stddev, u64 *mean_time) { int i, n = 0; *events_mean = 0; *events_stddev = 0; *mean_time = 0; for (i = 0; i < 32; i++) { if (!times[i]) break; *mean_time += times[i]; *events_mean += events_from_time(times[i]); n += 1; } if (!n) return 0; *mean_time /= n; *events_mean /= n; if (n > 1) { for (i = 0; i < n; i++) { double events_i = *events_mean - events_from_time(times[i]); *events_stddev += events_i * events_i / (n - 1); } *events_stddev = sqrt(*events_stddev); } return n; } static void hashmap_report_final(struct bench_res res[], int res_cnt) { unsigned int nr_cpus = bpf_num_possible_cpus(); double events_mean, events_stddev; u64 mean_time; int i, n; for (i = 0; i < nr_cpus; i++) { n = compute_events(ctx.skel->bss->percpu_times[i], &events_mean, &events_stddev, &mean_time); if (n == 0) continue; if (env.quiet) { /* we expect only one cpu to be present */ if (env.affinity) printf("%.3lf\n", events_mean); else printf("cpu%02d %.3lf\n", i, events_mean); } else { printf("cpu%02d: lookup %.3lfM ± %.3lfM events/sec" " (approximated from %d samples of ~%lums)\n", i, events_mean, 2*events_stddev, n, mean_time / 1000000); } } } const struct bench bench_bpf_hashmap_lookup = { .name = "bpf-hashmap-lookup", .argp = &bench_hashmap_lookup_argp, .validate = validate, .setup = setup, .producer_thread = producer, .measure = measure, .report_progress = NULL, .report_final = hashmap_report_final, };
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2007 Oracle. All rights reserved. */ #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/radix-tree.h> #include <linux/writeback.h> #include <linux/workqueue.h> #include <linux/kthread.h> #include <linux/slab.h> #include <linux/migrate.h> #include <linux/ratelimit.h> #include <linux/uuid.h> #include <linux/semaphore.h> #include <linux/error-injection.h> #include <linux/crc32c.h> #include <linux/sched/mm.h> #include <linux/unaligned.h> #include <crypto/hash.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "btrfs_inode.h" #include "bio.h" #include "print-tree.h" #include "locking.h" #include "tree-log.h" #include "free-space-cache.h" #include "free-space-tree.h" #include "dev-replace.h" #include "raid56.h" #include "sysfs.h" #include "qgroup.h" #include "compression.h" #include "tree-checker.h" #include "ref-verify.h" #include "block-group.h" #include "discard.h" #include "space-info.h" #include "zoned.h" #include "subpage.h" #include "fs.h" #include "accessors.h" #include "extent-tree.h" #include "root-tree.h" #include "defrag.h" #include "uuid-tree.h" #include "relocation.h" #include "scrub.h" #include "super.h" #define BTRFS_SUPER_FLAG_SUPP (BTRFS_HEADER_FLAG_WRITTEN |\ BTRFS_HEADER_FLAG_RELOC |\ BTRFS_SUPER_FLAG_ERROR |\ BTRFS_SUPER_FLAG_SEEDING |\ BTRFS_SUPER_FLAG_METADUMP |\ BTRFS_SUPER_FLAG_METADUMP_V2) static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info); static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info); static void btrfs_free_csum_hash(struct btrfs_fs_info *fs_info) { if (fs_info->csum_shash) crypto_free_shash(fs_info->csum_shash); } /* * Compute the csum of a btree block and store the result to provided buffer. */ static void csum_tree_block(struct extent_buffer *buf, u8 *result) { struct btrfs_fs_info *fs_info = buf->fs_info; int num_pages; u32 first_page_part; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); char *kaddr; int i; shash->tfm = fs_info->csum_shash; crypto_shash_init(shash); if (buf->addr) { /* Pages are contiguous, handle them as a big one. */ kaddr = buf->addr; first_page_part = fs_info->nodesize; num_pages = 1; } else { kaddr = folio_address(buf->folios[0]); first_page_part = min_t(u32, PAGE_SIZE, fs_info->nodesize); num_pages = num_extent_pages(buf); } crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, first_page_part - BTRFS_CSUM_SIZE); /* * Multiple single-page folios case would reach here. * * nodesize <= PAGE_SIZE and large folio all handled by above * crypto_shash_update() already. */ for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) { kaddr = folio_address(buf->folios[i]); crypto_shash_update(shash, kaddr, PAGE_SIZE); } memset(result, 0, BTRFS_CSUM_SIZE); crypto_shash_final(shash, result); } /* * we can't consider a given block up to date unless the transid of the * block matches the transid in the parent node's pointer. This is how we * detect blocks that either didn't get written at all or got written * in the wrong place. */ int btrfs_buffer_uptodate(struct extent_buffer *eb, u64 parent_transid, int atomic) { if (!extent_buffer_uptodate(eb)) return 0; if (!parent_transid || btrfs_header_generation(eb) == parent_transid) return 1; if (atomic) return -EAGAIN; if (!extent_buffer_uptodate(eb) || btrfs_header_generation(eb) != parent_transid) { btrfs_err_rl(eb->fs_info, "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu", eb->start, eb->read_mirror, parent_transid, btrfs_header_generation(eb)); clear_extent_buffer_uptodate(eb); return 0; } return 1; } static bool btrfs_supported_super_csum(u16 csum_type) { switch (csum_type) { case BTRFS_CSUM_TYPE_CRC32: case BTRFS_CSUM_TYPE_XXHASH: case BTRFS_CSUM_TYPE_SHA256: case BTRFS_CSUM_TYPE_BLAKE2: return true; default: return false; } } /* * Return 0 if the superblock checksum type matches the checksum value of that * algorithm. Pass the raw disk superblock data. */ int btrfs_check_super_csum(struct btrfs_fs_info *fs_info, const struct btrfs_super_block *disk_sb) { char result[BTRFS_CSUM_SIZE]; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); shash->tfm = fs_info->csum_shash; /* * The super_block structure does not span the whole * BTRFS_SUPER_INFO_SIZE range, we expect that the unused space is * filled with zeros and is included in the checksum. */ crypto_shash_digest(shash, (const u8 *)disk_sb + BTRFS_CSUM_SIZE, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, result); if (memcmp(disk_sb->csum, result, fs_info->csum_size)) return 1; return 0; } static int btrfs_repair_eb_io_failure(const struct extent_buffer *eb, int mirror_num) { struct btrfs_fs_info *fs_info = eb->fs_info; int num_folios = num_extent_folios(eb); int ret = 0; if (sb_rdonly(fs_info->sb)) return -EROFS; for (int i = 0; i < num_folios; i++) { struct folio *folio = eb->folios[i]; u64 start = max_t(u64, eb->start, folio_pos(folio)); u64 end = min_t(u64, eb->start + eb->len, folio_pos(folio) + eb->folio_size); u32 len = end - start; ret = btrfs_repair_io_failure(fs_info, 0, start, len, start, folio, offset_in_folio(folio, start), mirror_num); if (ret) break; } return ret; } /* * helper to read a given tree block, doing retries as required when * the checksums don't match and we have alternate mirrors to try. * * @check: expected tree parentness check, see the comments of the * structure for details. */ int btrfs_read_extent_buffer(struct extent_buffer *eb, const struct btrfs_tree_parent_check *check) { struct btrfs_fs_info *fs_info = eb->fs_info; int failed = 0; int ret; int num_copies = 0; int mirror_num = 0; int failed_mirror = 0; ASSERT(check); while (1) { clear_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); ret = read_extent_buffer_pages(eb, WAIT_COMPLETE, mirror_num, check); if (!ret) break; num_copies = btrfs_num_copies(fs_info, eb->start, eb->len); if (num_copies == 1) break; if (!failed_mirror) { failed = 1; failed_mirror = eb->read_mirror; } mirror_num++; if (mirror_num == failed_mirror) mirror_num++; if (mirror_num > num_copies) break; } if (failed && !ret && failed_mirror) btrfs_repair_eb_io_failure(eb, failed_mirror); return ret; } /* * Checksum a dirty tree block before IO. */ blk_status_t btree_csum_one_bio(struct btrfs_bio *bbio) { struct extent_buffer *eb = bbio->private; struct btrfs_fs_info *fs_info = eb->fs_info; u64 found_start = btrfs_header_bytenr(eb); u64 last_trans; u8 result[BTRFS_CSUM_SIZE]; int ret; /* Btree blocks are always contiguous on disk. */ if (WARN_ON_ONCE(bbio->file_offset != eb->start)) return BLK_STS_IOERR; if (WARN_ON_ONCE(bbio->bio.bi_iter.bi_size != eb->len)) return BLK_STS_IOERR; /* * If an extent_buffer is marked as EXTENT_BUFFER_ZONED_ZEROOUT, don't * checksum it but zero-out its content. This is done to preserve * ordering of I/O without unnecessarily writing out data. */ if (test_bit(EXTENT_BUFFER_ZONED_ZEROOUT, &eb->bflags)) { memzero_extent_buffer(eb, 0, eb->len); return BLK_STS_OK; } if (WARN_ON_ONCE(found_start != eb->start)) return BLK_STS_IOERR; if (WARN_ON(!btrfs_folio_test_uptodate(fs_info, eb->folios[0], eb->start, eb->len))) return BLK_STS_IOERR; ASSERT(memcmp_extent_buffer(eb, fs_info->fs_devices->metadata_uuid, offsetof(struct btrfs_header, fsid), BTRFS_FSID_SIZE) == 0); csum_tree_block(eb, result); if (btrfs_header_level(eb)) ret = btrfs_check_node(eb); else ret = btrfs_check_leaf(eb); if (ret < 0) goto error; /* * Also check the generation, the eb reached here must be newer than * last committed. Or something seriously wrong happened. */ last_trans = btrfs_get_last_trans_committed(fs_info); if (unlikely(btrfs_header_generation(eb) <= last_trans)) { ret = -EUCLEAN; btrfs_err(fs_info, "block=%llu bad generation, have %llu expect > %llu", eb->start, btrfs_header_generation(eb), last_trans); goto error; } write_extent_buffer(eb, result, 0, fs_info->csum_size); return BLK_STS_OK; error: btrfs_print_tree(eb, 0); btrfs_err(fs_info, "block=%llu write time tree block corruption detected", eb->start); /* * Be noisy if this is an extent buffer from a log tree. We don't abort * a transaction in case there's a bad log tree extent buffer, we just * fallback to a transaction commit. Still we want to know when there is * a bad log tree extent buffer, as that may signal a bug somewhere. */ WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG) || btrfs_header_owner(eb) == BTRFS_TREE_LOG_OBJECTID); return errno_to_blk_status(ret); } static bool check_tree_block_fsid(struct extent_buffer *eb) { struct btrfs_fs_info *fs_info = eb->fs_info; struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs; u8 fsid[BTRFS_FSID_SIZE]; read_extent_buffer(eb, fsid, offsetof(struct btrfs_header, fsid), BTRFS_FSID_SIZE); /* * alloc_fsid_devices() copies the fsid into fs_devices::metadata_uuid. * This is then overwritten by metadata_uuid if it is present in the * device_list_add(). The same true for a seed device as well. So use of * fs_devices::metadata_uuid is appropriate here. */ if (memcmp(fsid, fs_info->fs_devices->metadata_uuid, BTRFS_FSID_SIZE) == 0) return false; list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) if (!memcmp(fsid, seed_devs->fsid, BTRFS_FSID_SIZE)) return false; return true; } /* Do basic extent buffer checks at read time */ int btrfs_validate_extent_buffer(struct extent_buffer *eb, const struct btrfs_tree_parent_check *check) { struct btrfs_fs_info *fs_info = eb->fs_info; u64 found_start; const u32 csum_size = fs_info->csum_size; u8 found_level; u8 result[BTRFS_CSUM_SIZE]; const u8 *header_csum; int ret = 0; const bool ignore_csum = btrfs_test_opt(fs_info, IGNOREMETACSUMS); ASSERT(check); found_start = btrfs_header_bytenr(eb); if (found_start != eb->start) { btrfs_err_rl(fs_info, "bad tree block start, mirror %u want %llu have %llu", eb->read_mirror, eb->start, found_start); ret = -EIO; goto out; } if (check_tree_block_fsid(eb)) { btrfs_err_rl(fs_info, "bad fsid on logical %llu mirror %u", eb->start, eb->read_mirror); ret = -EIO; goto out; } found_level = btrfs_header_level(eb); if (found_level >= BTRFS_MAX_LEVEL) { btrfs_err(fs_info, "bad tree block level, mirror %u level %d on logical %llu", eb->read_mirror, btrfs_header_level(eb), eb->start); ret = -EIO; goto out; } csum_tree_block(eb, result); header_csum = folio_address(eb->folios[0]) + get_eb_offset_in_folio(eb, offsetof(struct btrfs_header, csum)); if (memcmp(result, header_csum, csum_size) != 0) { btrfs_warn_rl(fs_info, "checksum verify failed on logical %llu mirror %u wanted " CSUM_FMT " found " CSUM_FMT " level %d%s", eb->start, eb->read_mirror, CSUM_FMT_VALUE(csum_size, header_csum), CSUM_FMT_VALUE(csum_size, result), btrfs_header_level(eb), ignore_csum ? ", ignored" : ""); if (!ignore_csum) { ret = -EUCLEAN; goto out; } } if (found_level != check->level) { btrfs_err(fs_info, "level verify failed on logical %llu mirror %u wanted %u found %u", eb->start, eb->read_mirror, check->level, found_level); ret = -EIO; goto out; } if (unlikely(check->transid && btrfs_header_generation(eb) != check->transid)) { btrfs_err_rl(eb->fs_info, "parent transid verify failed on logical %llu mirror %u wanted %llu found %llu", eb->start, eb->read_mirror, check->transid, btrfs_header_generation(eb)); ret = -EIO; goto out; } if (check->has_first_key) { const struct btrfs_key *expect_key = &check->first_key; struct btrfs_key found_key; if (found_level) btrfs_node_key_to_cpu(eb, &found_key, 0); else btrfs_item_key_to_cpu(eb, &found_key, 0); if (unlikely(btrfs_comp_cpu_keys(expect_key, &found_key))) { btrfs_err(fs_info, "tree first key mismatch detected, bytenr=%llu parent_transid=%llu key expected=(%llu,%u,%llu) has=(%llu,%u,%llu)", eb->start, check->transid, expect_key->objectid, expect_key->type, expect_key->offset, found_key.objectid, found_key.type, found_key.offset); ret = -EUCLEAN; goto out; } } if (check->owner_root) { ret = btrfs_check_eb_owner(eb, check->owner_root); if (ret < 0) goto out; } /* * If this is a leaf block and it is corrupt, set the corrupt bit so * that we don't try and read the other copies of this block, just * return -EIO. */ if (found_level == 0 && btrfs_check_leaf(eb)) { set_bit(EXTENT_BUFFER_CORRUPT, &eb->bflags); ret = -EIO; } if (found_level > 0 && btrfs_check_node(eb)) ret = -EIO; if (ret) btrfs_err(fs_info, "read time tree block corruption detected on logical %llu mirror %u", eb->start, eb->read_mirror); out: return ret; } #ifdef CONFIG_MIGRATION static int btree_migrate_folio(struct address_space *mapping, struct folio *dst, struct folio *src, enum migrate_mode mode) { /* * we can't safely write a btree page from here, * we haven't done the locking hook */ if (folio_test_dirty(src)) return -EAGAIN; /* * Buffers may be managed in a filesystem specific way. * We must have no buffers or drop them. */ if (folio_get_private(src) && !filemap_release_folio(src, GFP_KERNEL)) return -EAGAIN; return migrate_folio(mapping, dst, src, mode); } #else #define btree_migrate_folio NULL #endif static int btree_writepages(struct address_space *mapping, struct writeback_control *wbc) { int ret; if (wbc->sync_mode == WB_SYNC_NONE) { struct btrfs_fs_info *fs_info; if (wbc->for_kupdate) return 0; fs_info = inode_to_fs_info(mapping->host); /* this is a bit racy, but that's ok */ ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, BTRFS_DIRTY_METADATA_THRESH, fs_info->dirty_metadata_batch); if (ret < 0) return 0; } return btree_write_cache_pages(mapping, wbc); } static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags) { if (folio_test_writeback(folio) || folio_test_dirty(folio)) return false; return try_release_extent_buffer(folio); } static void btree_invalidate_folio(struct folio *folio, size_t offset, size_t length) { struct extent_io_tree *tree; tree = &folio_to_inode(folio)->io_tree; extent_invalidate_folio(tree, folio, offset); btree_release_folio(folio, GFP_NOFS); if (folio_get_private(folio)) { btrfs_warn(folio_to_fs_info(folio), "folio private not zero on folio %llu", (unsigned long long)folio_pos(folio)); folio_detach_private(folio); } } #ifdef DEBUG static bool btree_dirty_folio(struct address_space *mapping, struct folio *folio) { struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host); struct btrfs_subpage_info *spi = fs_info->subpage_info; struct btrfs_subpage *subpage; struct extent_buffer *eb; int cur_bit = 0; u64 page_start = folio_pos(folio); if (fs_info->sectorsize == PAGE_SIZE) { eb = folio_get_private(folio); BUG_ON(!eb); BUG_ON(!test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); BUG_ON(!atomic_read(&eb->refs)); btrfs_assert_tree_write_locked(eb); return filemap_dirty_folio(mapping, folio); } ASSERT(spi); subpage = folio_get_private(folio); for (cur_bit = spi->dirty_offset; cur_bit < spi->dirty_offset + spi->bitmap_nr_bits; cur_bit++) { unsigned long flags; u64 cur; spin_lock_irqsave(&subpage->lock, flags); if (!test_bit(cur_bit, subpage->bitmaps)) { spin_unlock_irqrestore(&subpage->lock, flags); continue; } spin_unlock_irqrestore(&subpage->lock, flags); cur = page_start + cur_bit * fs_info->sectorsize; eb = find_extent_buffer(fs_info, cur); ASSERT(eb); ASSERT(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags)); ASSERT(atomic_read(&eb->refs)); btrfs_assert_tree_write_locked(eb); free_extent_buffer(eb); cur_bit += (fs_info->nodesize >> fs_info->sectorsize_bits) - 1; } return filemap_dirty_folio(mapping, folio); } #else #define btree_dirty_folio filemap_dirty_folio #endif static const struct address_space_operations btree_aops = { .writepages = btree_writepages, .release_folio = btree_release_folio, .invalidate_folio = btree_invalidate_folio, .migrate_folio = btree_migrate_folio, .dirty_folio = btree_dirty_folio, }; struct extent_buffer *btrfs_find_create_tree_block( struct btrfs_fs_info *fs_info, u64 bytenr, u64 owner_root, int level) { if (btrfs_is_testing(fs_info)) return alloc_test_extent_buffer(fs_info, bytenr); return alloc_extent_buffer(fs_info, bytenr, owner_root, level); } /* * Read tree block at logical address @bytenr and do variant basic but critical * verification. * * @check: expected tree parentness check, see comments of the * structure for details. */ struct extent_buffer *read_tree_block(struct btrfs_fs_info *fs_info, u64 bytenr, struct btrfs_tree_parent_check *check) { struct extent_buffer *buf = NULL; int ret; ASSERT(check); buf = btrfs_find_create_tree_block(fs_info, bytenr, check->owner_root, check->level); if (IS_ERR(buf)) return buf; ret = btrfs_read_extent_buffer(buf, check); if (ret) { free_extent_buffer_stale(buf); return ERR_PTR(ret); } return buf; } static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, u64 objectid) { bool dummy = btrfs_is_testing(fs_info); memset(&root->root_key, 0, sizeof(root->root_key)); memset(&root->root_item, 0, sizeof(root->root_item)); memset(&root->defrag_progress, 0, sizeof(root->defrag_progress)); root->fs_info = fs_info; root->root_key.objectid = objectid; root->node = NULL; root->commit_root = NULL; root->state = 0; RB_CLEAR_NODE(&root->rb_node); btrfs_set_root_last_trans(root, 0); root->free_objectid = 0; root->nr_delalloc_inodes = 0; root->nr_ordered_extents = 0; xa_init(&root->inodes); xa_init(&root->delayed_nodes); btrfs_init_root_block_rsv(root); INIT_LIST_HEAD(&root->dirty_list); INIT_LIST_HEAD(&root->root_list); INIT_LIST_HEAD(&root->delalloc_inodes); INIT_LIST_HEAD(&root->delalloc_root); INIT_LIST_HEAD(&root->ordered_extents); INIT_LIST_HEAD(&root->ordered_root); INIT_LIST_HEAD(&root->reloc_dirty_list); spin_lock_init(&root->delalloc_lock); spin_lock_init(&root->ordered_extent_lock); spin_lock_init(&root->accounting_lock); spin_lock_init(&root->qgroup_meta_rsv_lock); mutex_init(&root->objectid_mutex); mutex_init(&root->log_mutex); mutex_init(&root->ordered_extent_mutex); mutex_init(&root->delalloc_mutex); init_waitqueue_head(&root->qgroup_flush_wait); init_waitqueue_head(&root->log_writer_wait); init_waitqueue_head(&root->log_commit_wait[0]); init_waitqueue_head(&root->log_commit_wait[1]); INIT_LIST_HEAD(&root->log_ctxs[0]); INIT_LIST_HEAD(&root->log_ctxs[1]); atomic_set(&root->log_commit[0], 0); atomic_set(&root->log_commit[1], 0); atomic_set(&root->log_writers, 0); atomic_set(&root->log_batch, 0); refcount_set(&root->refs, 1); atomic_set(&root->snapshot_force_cow, 0); atomic_set(&root->nr_swapfiles, 0); btrfs_set_root_log_transid(root, 0); root->log_transid_committed = -1; btrfs_set_root_last_log_commit(root, 0); root->anon_dev = 0; if (!dummy) { extent_io_tree_init(fs_info, &root->dirty_log_pages, IO_TREE_ROOT_DIRTY_LOG_PAGES); extent_io_tree_init(fs_info, &root->log_csum_range, IO_TREE_LOG_CSUM_RANGE); } spin_lock_init(&root->root_item_lock); btrfs_qgroup_init_swapped_blocks(&root->swapped_blocks); #ifdef CONFIG_BTRFS_DEBUG INIT_LIST_HEAD(&root->leak_list); spin_lock(&fs_info->fs_roots_radix_lock); list_add_tail(&root->leak_list, &fs_info->allocated_roots); spin_unlock(&fs_info->fs_roots_radix_lock); #endif } static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info, u64 objectid, gfp_t flags) { struct btrfs_root *root = kzalloc(sizeof(*root), flags); if (root) __setup_root(root, fs_info, objectid); return root; } #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS /* Should only be used by the testing infrastructure */ struct btrfs_root *btrfs_alloc_dummy_root(struct btrfs_fs_info *fs_info) { struct btrfs_root *root; if (!fs_info) return ERR_PTR(-EINVAL); root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL); if (!root) return ERR_PTR(-ENOMEM); /* We don't use the stripesize in selftest, set it as sectorsize */ root->alloc_bytenr = 0; return root; } #endif static int global_root_cmp(struct rb_node *a_node, const struct rb_node *b_node) { const struct btrfs_root *a = rb_entry(a_node, struct btrfs_root, rb_node); const struct btrfs_root *b = rb_entry(b_node, struct btrfs_root, rb_node); return btrfs_comp_cpu_keys(&a->root_key, &b->root_key); } static int global_root_key_cmp(const void *k, const struct rb_node *node) { const struct btrfs_key *key = k; const struct btrfs_root *root = rb_entry(node, struct btrfs_root, rb_node); return btrfs_comp_cpu_keys(key, &root->root_key); } int btrfs_global_root_insert(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct rb_node *tmp; int ret = 0; write_lock(&fs_info->global_root_lock); tmp = rb_find_add(&root->rb_node, &fs_info->global_root_tree, global_root_cmp); write_unlock(&fs_info->global_root_lock); if (tmp) { ret = -EEXIST; btrfs_warn(fs_info, "global root %llu %llu already exists", btrfs_root_id(root), root->root_key.offset); } return ret; } void btrfs_global_root_delete(struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; write_lock(&fs_info->global_root_lock); rb_erase(&root->rb_node, &fs_info->global_root_tree); write_unlock(&fs_info->global_root_lock); } struct btrfs_root *btrfs_global_root(struct btrfs_fs_info *fs_info, struct btrfs_key *key) { struct rb_node *node; struct btrfs_root *root = NULL; read_lock(&fs_info->global_root_lock); node = rb_find(key, &fs_info->global_root_tree, global_root_key_cmp); if (node) root = container_of(node, struct btrfs_root, rb_node); read_unlock(&fs_info->global_root_lock); return root; } static u64 btrfs_global_root_id(struct btrfs_fs_info *fs_info, u64 bytenr) { struct btrfs_block_group *block_group; u64 ret; if (!btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) return 0; if (bytenr) block_group = btrfs_lookup_block_group(fs_info, bytenr); else block_group = btrfs_lookup_first_block_group(fs_info, bytenr); ASSERT(block_group); if (!block_group) return 0; ret = block_group->global_root_id; btrfs_put_block_group(block_group); return ret; } struct btrfs_root *btrfs_csum_root(struct btrfs_fs_info *fs_info, u64 bytenr) { struct btrfs_key key = { .objectid = BTRFS_CSUM_TREE_OBJECTID, .type = BTRFS_ROOT_ITEM_KEY, .offset = btrfs_global_root_id(fs_info, bytenr), }; return btrfs_global_root(fs_info, &key); } struct btrfs_root *btrfs_extent_root(struct btrfs_fs_info *fs_info, u64 bytenr) { struct btrfs_key key = { .objectid = BTRFS_EXTENT_TREE_OBJECTID, .type = BTRFS_ROOT_ITEM_KEY, .offset = btrfs_global_root_id(fs_info, bytenr), }; return btrfs_global_root(fs_info, &key); } struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, u64 objectid) { struct btrfs_fs_info *fs_info = trans->fs_info; struct extent_buffer *leaf; struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *root; struct btrfs_key key; unsigned int nofs_flag; int ret = 0; /* * We're holding a transaction handle, so use a NOFS memory allocation * context to avoid deadlock if reclaim happens. */ nofs_flag = memalloc_nofs_save(); root = btrfs_alloc_root(fs_info, objectid, GFP_KERNEL); memalloc_nofs_restore(nofs_flag); if (!root) return ERR_PTR(-ENOMEM); root->root_key.objectid = objectid; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = 0; leaf = btrfs_alloc_tree_block(trans, root, 0, objectid, NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL); if (IS_ERR(leaf)) { ret = PTR_ERR(leaf); leaf = NULL; goto fail; } root->node = leaf; btrfs_mark_buffer_dirty(trans, leaf); root->commit_root = btrfs_root_node(root); set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); btrfs_set_root_flags(&root->root_item, 0); btrfs_set_root_limit(&root->root_item, 0); btrfs_set_root_bytenr(&root->root_item, leaf->start); btrfs_set_root_generation(&root->root_item, trans->transid); btrfs_set_root_level(&root->root_item, 0); btrfs_set_root_refs(&root->root_item, 1); btrfs_set_root_used(&root->root_item, leaf->len); btrfs_set_root_last_snapshot(&root->root_item, 0); btrfs_set_root_dirid(&root->root_item, 0); if (is_fstree(objectid)) generate_random_guid(root->root_item.uuid); else export_guid(root->root_item.uuid, &guid_null); btrfs_set_root_drop_level(&root->root_item, 0); btrfs_tree_unlock(leaf); key.objectid = objectid; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = 0; ret = btrfs_insert_root(trans, tree_root, &key, &root->root_item); if (ret) goto fail; return root; fail: btrfs_put_root(root); return ERR_PTR(ret); } static struct btrfs_root *alloc_log_tree(struct btrfs_fs_info *fs_info) { struct btrfs_root *root; root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_NOFS); if (!root) return ERR_PTR(-ENOMEM); root->root_key.objectid = BTRFS_TREE_LOG_OBJECTID; root->root_key.type = BTRFS_ROOT_ITEM_KEY; root->root_key.offset = BTRFS_TREE_LOG_OBJECTID; return root; } int btrfs_alloc_log_tree_node(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct extent_buffer *leaf; /* * DON'T set SHAREABLE bit for log trees. * * Log trees are not exposed to user space thus can't be snapshotted, * and they go away before a real commit is actually done. * * They do store pointers to file data extents, and those reference * counts still get updated (along with back refs to the log tree). */ leaf = btrfs_alloc_tree_block(trans, root, 0, BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0, 0, BTRFS_NESTING_NORMAL); if (IS_ERR(leaf)) return PTR_ERR(leaf); root->node = leaf; btrfs_mark_buffer_dirty(trans, root->node); btrfs_tree_unlock(root->node); return 0; } int btrfs_init_log_root_tree(struct btrfs_trans_handle *trans, struct btrfs_fs_info *fs_info) { struct btrfs_root *log_root; log_root = alloc_log_tree(fs_info); if (IS_ERR(log_root)) return PTR_ERR(log_root); if (!btrfs_is_zoned(fs_info)) { int ret = btrfs_alloc_log_tree_node(trans, log_root); if (ret) { btrfs_put_root(log_root); return ret; } } WARN_ON(fs_info->log_root_tree); fs_info->log_root_tree = log_root; return 0; } int btrfs_add_log_tree(struct btrfs_trans_handle *trans, struct btrfs_root *root) { struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_root *log_root; struct btrfs_inode_item *inode_item; int ret; log_root = alloc_log_tree(fs_info); if (IS_ERR(log_root)) return PTR_ERR(log_root); ret = btrfs_alloc_log_tree_node(trans, log_root); if (ret) { btrfs_put_root(log_root); return ret; } btrfs_set_root_last_trans(log_root, trans->transid); log_root->root_key.offset = btrfs_root_id(root); inode_item = &log_root->root_item.inode; btrfs_set_stack_inode_generation(inode_item, 1); btrfs_set_stack_inode_size(inode_item, 3); btrfs_set_stack_inode_nlink(inode_item, 1); btrfs_set_stack_inode_nbytes(inode_item, fs_info->nodesize); btrfs_set_stack_inode_mode(inode_item, S_IFDIR | 0755); btrfs_set_root_node(&log_root->root_item, log_root->node); WARN_ON(root->log_root); root->log_root = log_root; btrfs_set_root_log_transid(root, 0); root->log_transid_committed = -1; btrfs_set_root_last_log_commit(root, 0); return 0; } static struct btrfs_root *read_tree_root_path(struct btrfs_root *tree_root, struct btrfs_path *path, const struct btrfs_key *key) { struct btrfs_root *root; struct btrfs_tree_parent_check check = { 0 }; struct btrfs_fs_info *fs_info = tree_root->fs_info; u64 generation; int ret; int level; root = btrfs_alloc_root(fs_info, key->objectid, GFP_NOFS); if (!root) return ERR_PTR(-ENOMEM); ret = btrfs_find_root(tree_root, key, path, &root->root_item, &root->root_key); if (ret) { if (ret > 0) ret = -ENOENT; goto fail; } generation = btrfs_root_generation(&root->root_item); level = btrfs_root_level(&root->root_item); check.level = level; check.transid = generation; check.owner_root = key->objectid; root->node = read_tree_block(fs_info, btrfs_root_bytenr(&root->root_item), &check); if (IS_ERR(root->node)) { ret = PTR_ERR(root->node); root->node = NULL; goto fail; } if (!btrfs_buffer_uptodate(root->node, generation, 0)) { ret = -EIO; goto fail; } /* * For real fs, and not log/reloc trees, root owner must * match its root node owner */ if (!btrfs_is_testing(fs_info) && btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID && btrfs_root_id(root) != BTRFS_TREE_RELOC_OBJECTID && btrfs_root_id(root) != btrfs_header_owner(root->node)) { btrfs_crit(fs_info, "root=%llu block=%llu, tree root owner mismatch, have %llu expect %llu", btrfs_root_id(root), root->node->start, btrfs_header_owner(root->node), btrfs_root_id(root)); ret = -EUCLEAN; goto fail; } root->commit_root = btrfs_root_node(root); return root; fail: btrfs_put_root(root); return ERR_PTR(ret); } struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, const struct btrfs_key *key) { struct btrfs_root *root; struct btrfs_path *path; path = btrfs_alloc_path(); if (!path) return ERR_PTR(-ENOMEM); root = read_tree_root_path(tree_root, path, key); btrfs_free_path(path); return root; } /* * Initialize subvolume root in-memory structure * * @anon_dev: anonymous device to attach to the root, if zero, allocate new */ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) { int ret; btrfs_drew_lock_init(&root->snapshot_lock); if (btrfs_root_id(root) != BTRFS_TREE_LOG_OBJECTID && !btrfs_is_data_reloc_root(root) && is_fstree(btrfs_root_id(root))) { set_bit(BTRFS_ROOT_SHAREABLE, &root->state); btrfs_check_and_init_root_item(&root->root_item); } /* * Don't assign anonymous block device to roots that are not exposed to * userspace, the id pool is limited to 1M */ if (is_fstree(btrfs_root_id(root)) && btrfs_root_refs(&root->root_item) > 0) { if (!anon_dev) { ret = get_anon_bdev(&root->anon_dev); if (ret) goto fail; } else { root->anon_dev = anon_dev; } } mutex_lock(&root->objectid_mutex); ret = btrfs_init_root_free_objectid(root); if (ret) { mutex_unlock(&root->objectid_mutex); goto fail; } ASSERT(root->free_objectid <= BTRFS_LAST_FREE_OBJECTID); mutex_unlock(&root->objectid_mutex); return 0; fail: /* The caller is responsible to call btrfs_free_fs_root */ return ret; } static struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, u64 root_id) { struct btrfs_root *root; spin_lock(&fs_info->fs_roots_radix_lock); root = radix_tree_lookup(&fs_info->fs_roots_radix, (unsigned long)root_id); root = btrfs_grab_root(root); spin_unlock(&fs_info->fs_roots_radix_lock); return root; } static struct btrfs_root *btrfs_get_global_root(struct btrfs_fs_info *fs_info, u64 objectid) { struct btrfs_key key = { .objectid = objectid, .type = BTRFS_ROOT_ITEM_KEY, .offset = 0, }; switch (objectid) { case BTRFS_ROOT_TREE_OBJECTID: return btrfs_grab_root(fs_info->tree_root); case BTRFS_EXTENT_TREE_OBJECTID: return btrfs_grab_root(btrfs_global_root(fs_info, &key)); case BTRFS_CHUNK_TREE_OBJECTID: return btrfs_grab_root(fs_info->chunk_root); case BTRFS_DEV_TREE_OBJECTID: return btrfs_grab_root(fs_info->dev_root); case BTRFS_CSUM_TREE_OBJECTID: return btrfs_grab_root(btrfs_global_root(fs_info, &key)); case BTRFS_QUOTA_TREE_OBJECTID: return btrfs_grab_root(fs_info->quota_root); case BTRFS_UUID_TREE_OBJECTID: return btrfs_grab_root(fs_info->uuid_root); case BTRFS_BLOCK_GROUP_TREE_OBJECTID: return btrfs_grab_root(fs_info->block_group_root); case BTRFS_FREE_SPACE_TREE_OBJECTID: return btrfs_grab_root(btrfs_global_root(fs_info, &key)); case BTRFS_RAID_STRIPE_TREE_OBJECTID: return btrfs_grab_root(fs_info->stripe_root); default: return NULL; } } int btrfs_insert_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) { int ret; ret = radix_tree_preload(GFP_NOFS); if (ret) return ret; spin_lock(&fs_info->fs_roots_radix_lock); ret = radix_tree_insert(&fs_info->fs_roots_radix, (unsigned long)btrfs_root_id(root), root); if (ret == 0) { btrfs_grab_root(root); set_bit(BTRFS_ROOT_IN_RADIX, &root->state); } spin_unlock(&fs_info->fs_roots_radix_lock); radix_tree_preload_end(); return ret; } void btrfs_check_leaked_roots(const struct btrfs_fs_info *fs_info) { #ifdef CONFIG_BTRFS_DEBUG struct btrfs_root *root; while (!list_empty(&fs_info->allocated_roots)) { char buf[BTRFS_ROOT_NAME_BUF_LEN]; root = list_first_entry(&fs_info->allocated_roots, struct btrfs_root, leak_list); btrfs_err(fs_info, "leaked root %s refcount %d", btrfs_root_name(&root->root_key, buf), refcount_read(&root->refs)); WARN_ON_ONCE(1); while (refcount_read(&root->refs) > 1) btrfs_put_root(root); btrfs_put_root(root); } #endif } static void free_global_roots(struct btrfs_fs_info *fs_info) { struct btrfs_root *root; struct rb_node *node; while ((node = rb_first_postorder(&fs_info->global_root_tree)) != NULL) { root = rb_entry(node, struct btrfs_root, rb_node); rb_erase(&root->rb_node, &fs_info->global_root_tree); btrfs_put_root(root); } } void btrfs_free_fs_info(struct btrfs_fs_info *fs_info) { struct percpu_counter *em_counter = &fs_info->evictable_extent_maps; percpu_counter_destroy(&fs_info->dirty_metadata_bytes); percpu_counter_destroy(&fs_info->delalloc_bytes); percpu_counter_destroy(&fs_info->ordered_bytes); if (percpu_counter_initialized(em_counter)) ASSERT(percpu_counter_sum_positive(em_counter) == 0); percpu_counter_destroy(em_counter); percpu_counter_destroy(&fs_info->dev_replace.bio_counter); btrfs_free_csum_hash(fs_info); btrfs_free_stripe_hash_table(fs_info); btrfs_free_ref_cache(fs_info); kfree(fs_info->balance_ctl); kfree(fs_info->delayed_root); free_global_roots(fs_info); btrfs_put_root(fs_info->tree_root); btrfs_put_root(fs_info->chunk_root); btrfs_put_root(fs_info->dev_root); btrfs_put_root(fs_info->quota_root); btrfs_put_root(fs_info->uuid_root); btrfs_put_root(fs_info->fs_root); btrfs_put_root(fs_info->data_reloc_root); btrfs_put_root(fs_info->block_group_root); btrfs_put_root(fs_info->stripe_root); btrfs_check_leaked_roots(fs_info); btrfs_extent_buffer_leak_debug_check(fs_info); kfree(fs_info->super_copy); kfree(fs_info->super_for_commit); kvfree(fs_info); } /* * Get an in-memory reference of a root structure. * * For essential trees like root/extent tree, we grab it from fs_info directly. * For subvolume trees, we check the cached filesystem roots first. If not * found, then read it from disk and add it to cached fs roots. * * Caller should release the root by calling btrfs_put_root() after the usage. * * NOTE: Reloc and log trees can't be read by this function as they share the * same root objectid. * * @objectid: root id * @anon_dev: preallocated anonymous block device number for new roots, * pass NULL for a new allocation. * @check_ref: whether to check root item references, If true, return -ENOENT * for orphan roots */ static struct btrfs_root *btrfs_get_root_ref(struct btrfs_fs_info *fs_info, u64 objectid, dev_t *anon_dev, bool check_ref) { struct btrfs_root *root; struct btrfs_path *path; struct btrfs_key key; int ret; root = btrfs_get_global_root(fs_info, objectid); if (root) return root; /* * If we're called for non-subvolume trees, and above function didn't * find one, do not try to read it from disk. * * This is namely for free-space-tree and quota tree, which can change * at runtime and should only be grabbed from fs_info. */ if (!is_fstree(objectid) && objectid != BTRFS_DATA_RELOC_TREE_OBJECTID) return ERR_PTR(-ENOENT); again: root = btrfs_lookup_fs_root(fs_info, objectid); if (root) { /* * Some other caller may have read out the newly inserted * subvolume already (for things like backref walk etc). Not * that common but still possible. In that case, we just need * to free the anon_dev. */ if (unlikely(anon_dev && *anon_dev)) { free_anon_bdev(*anon_dev); *anon_dev = 0; } if (check_ref && btrfs_root_refs(&root->root_item) == 0) { btrfs_put_root(root); return ERR_PTR(-ENOENT); } return root; } key.objectid = objectid; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; root = btrfs_read_tree_root(fs_info->tree_root, &key); if (IS_ERR(root)) return root; if (check_ref && btrfs_root_refs(&root->root_item) == 0) { ret = -ENOENT; goto fail; } ret = btrfs_init_fs_root(root, anon_dev ? *anon_dev : 0); if (ret) goto fail; path = btrfs_alloc_path(); if (!path) { ret = -ENOMEM; goto fail; } key.objectid = BTRFS_ORPHAN_OBJECTID; key.type = BTRFS_ORPHAN_ITEM_KEY; key.offset = objectid; ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0); btrfs_free_path(path); if (ret < 0) goto fail; if (ret == 0) set_bit(BTRFS_ROOT_ORPHAN_ITEM_INSERTED, &root->state); ret = btrfs_insert_fs_root(fs_info, root); if (ret) { if (ret == -EEXIST) { btrfs_put_root(root); goto again; } goto fail; } return root; fail: /* * If our caller provided us an anonymous device, then it's his * responsibility to free it in case we fail. So we have to set our * root's anon_dev to 0 to avoid a double free, once by btrfs_put_root() * and once again by our caller. */ if (anon_dev && *anon_dev) root->anon_dev = 0; btrfs_put_root(root); return ERR_PTR(ret); } /* * Get in-memory reference of a root structure * * @objectid: tree objectid * @check_ref: if set, verify that the tree exists and the item has at least * one reference */ struct btrfs_root *btrfs_get_fs_root(struct btrfs_fs_info *fs_info, u64 objectid, bool check_ref) { return btrfs_get_root_ref(fs_info, objectid, NULL, check_ref); } /* * Get in-memory reference of a root structure, created as new, optionally pass * the anonymous block device id * * @objectid: tree objectid * @anon_dev: if NULL, allocate a new anonymous block device or use the * parameter value if not NULL */ struct btrfs_root *btrfs_get_new_fs_root(struct btrfs_fs_info *fs_info, u64 objectid, dev_t *anon_dev) { return btrfs_get_root_ref(fs_info, objectid, anon_dev, true); } /* * Return a root for the given objectid. * * @fs_info: the fs_info * @objectid: the objectid we need to lookup * * This is exclusively used for backref walking, and exists specifically because * of how qgroups does lookups. Qgroups will do a backref lookup at delayed ref * creation time, which means we may have to read the tree_root in order to look * up a fs root that is not in memory. If the root is not in memory we will * read the tree root commit root and look up the fs root from there. This is a * temporary root, it will not be inserted into the radix tree as it doesn't * have the most uptodate information, it'll simply be discarded once the * backref code is finished using the root. */ struct btrfs_root *btrfs_get_fs_root_commit_root(struct btrfs_fs_info *fs_info, struct btrfs_path *path, u64 objectid) { struct btrfs_root *root; struct btrfs_key key; ASSERT(path->search_commit_root && path->skip_locking); /* * This can return -ENOENT if we ask for a root that doesn't exist, but * since this is called via the backref walking code we won't be looking * up a root that doesn't exist, unless there's corruption. So if root * != NULL just return it. */ root = btrfs_get_global_root(fs_info, objectid); if (root) return root; root = btrfs_lookup_fs_root(fs_info, objectid); if (root) return root; key.objectid = objectid; key.type = BTRFS_ROOT_ITEM_KEY; key.offset = (u64)-1; root = read_tree_root_path(fs_info->tree_root, path, &key); btrfs_release_path(path); return root; } static int cleaner_kthread(void *arg) { struct btrfs_fs_info *fs_info = arg; int again; while (1) { again = 0; set_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); /* Make the cleaner go to sleep early. */ if (btrfs_need_cleaner_sleep(fs_info)) goto sleep; /* * Do not do anything if we might cause open_ctree() to block * before we have finished mounting the filesystem. */ if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags)) goto sleep; if (!mutex_trylock(&fs_info->cleaner_mutex)) goto sleep; /* * Avoid the problem that we change the status of the fs * during the above check and trylock. */ if (btrfs_need_cleaner_sleep(fs_info)) { mutex_unlock(&fs_info->cleaner_mutex); goto sleep; } if (test_and_clear_bit(BTRFS_FS_FEATURE_CHANGED, &fs_info->flags)) btrfs_sysfs_feature_update(fs_info); btrfs_run_delayed_iputs(fs_info); again = btrfs_clean_one_deleted_snapshot(fs_info); mutex_unlock(&fs_info->cleaner_mutex); /* * The defragger has dealt with the R/O remount and umount, * needn't do anything special here. */ btrfs_run_defrag_inodes(fs_info); /* * Acquires fs_info->reclaim_bgs_lock to avoid racing * with relocation (btrfs_relocate_chunk) and relocation * acquires fs_info->cleaner_mutex (btrfs_relocate_block_group) * after acquiring fs_info->reclaim_bgs_lock. So we * can't hold, nor need to, fs_info->cleaner_mutex when deleting * unused block groups. */ btrfs_delete_unused_bgs(fs_info); /* * Reclaim block groups in the reclaim_bgs list after we deleted * all unused block_groups. This possibly gives us some more free * space. */ btrfs_reclaim_bgs(fs_info); sleep: clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags); if (kthread_should_park()) kthread_parkme(); if (kthread_should_stop()) return 0; if (!again) { set_current_state(TASK_INTERRUPTIBLE); schedule(); __set_current_state(TASK_RUNNING); } } } static int transaction_kthread(void *arg) { struct btrfs_root *root = arg; struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_trans_handle *trans; struct btrfs_transaction *cur; u64 transid; time64_t delta; unsigned long delay; bool cannot_commit; do { cannot_commit = false; delay = msecs_to_jiffies(fs_info->commit_interval * 1000); mutex_lock(&fs_info->transaction_kthread_mutex); spin_lock(&fs_info->trans_lock); cur = fs_info->running_transaction; if (!cur) { spin_unlock(&fs_info->trans_lock); goto sleep; } delta = ktime_get_seconds() - cur->start_time; if (!test_and_clear_bit(BTRFS_FS_COMMIT_TRANS, &fs_info->flags) && cur->state < TRANS_STATE_COMMIT_PREP && delta < fs_info->commit_interval) { spin_unlock(&fs_info->trans_lock); delay -= msecs_to_jiffies((delta - 1) * 1000); delay = min(delay, msecs_to_jiffies(fs_info->commit_interval * 1000)); goto sleep; } transid = cur->transid; spin_unlock(&fs_info->trans_lock); /* If the file system is aborted, this will always fail. */ trans = btrfs_attach_transaction(root); if (IS_ERR(trans)) { if (PTR_ERR(trans) != -ENOENT) cannot_commit = true; goto sleep; } if (transid == trans->transid) { btrfs_commit_transaction(trans); } else { btrfs_end_transaction(trans); } sleep: wake_up_process(fs_info->cleaner_kthread); mutex_unlock(&fs_info->transaction_kthread_mutex); if (BTRFS_FS_ERROR(fs_info)) btrfs_cleanup_transaction(fs_info); if (!kthread_should_stop() && (!btrfs_transaction_blocked(fs_info) || cannot_commit)) schedule_timeout_interruptible(delay); } while (!kthread_should_stop()); return 0; } /* * This will find the highest generation in the array of root backups. The * index of the highest array is returned, or -EINVAL if we can't find * anything. * * We check to make sure the array is valid by comparing the * generation of the latest root in the array with the generation * in the super block. If they don't match we pitch it. */ static int find_newest_super_backup(struct btrfs_fs_info *info) { const u64 newest_gen = btrfs_super_generation(info->super_copy); u64 cur; struct btrfs_root_backup *root_backup; int i; for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { root_backup = info->super_copy->super_roots + i; cur = btrfs_backup_tree_root_gen(root_backup); if (cur == newest_gen) return i; } return -EINVAL; } /* * copy all the root pointers into the super backup array. * this will bump the backup pointer by one when it is * done */ static void backup_super_roots(struct btrfs_fs_info *info) { const int next_backup = info->backup_root_index; struct btrfs_root_backup *root_backup; root_backup = info->super_for_commit->super_roots + next_backup; /* * make sure all of our padding and empty slots get zero filled * regardless of which ones we use today */ memset(root_backup, 0, sizeof(*root_backup)); info->backup_root_index = (next_backup + 1) % BTRFS_NUM_BACKUP_ROOTS; btrfs_set_backup_tree_root(root_backup, info->tree_root->node->start); btrfs_set_backup_tree_root_gen(root_backup, btrfs_header_generation(info->tree_root->node)); btrfs_set_backup_tree_root_level(root_backup, btrfs_header_level(info->tree_root->node)); btrfs_set_backup_chunk_root(root_backup, info->chunk_root->node->start); btrfs_set_backup_chunk_root_gen(root_backup, btrfs_header_generation(info->chunk_root->node)); btrfs_set_backup_chunk_root_level(root_backup, btrfs_header_level(info->chunk_root->node)); if (!btrfs_fs_compat_ro(info, BLOCK_GROUP_TREE)) { struct btrfs_root *extent_root = btrfs_extent_root(info, 0); struct btrfs_root *csum_root = btrfs_csum_root(info, 0); btrfs_set_backup_extent_root(root_backup, extent_root->node->start); btrfs_set_backup_extent_root_gen(root_backup, btrfs_header_generation(extent_root->node)); btrfs_set_backup_extent_root_level(root_backup, btrfs_header_level(extent_root->node)); btrfs_set_backup_csum_root(root_backup, csum_root->node->start); btrfs_set_backup_csum_root_gen(root_backup, btrfs_header_generation(csum_root->node)); btrfs_set_backup_csum_root_level(root_backup, btrfs_header_level(csum_root->node)); } /* * we might commit during log recovery, which happens before we set * the fs_root. Make sure it is valid before we fill it in. */ if (info->fs_root && info->fs_root->node) { btrfs_set_backup_fs_root(root_backup, info->fs_root->node->start); btrfs_set_backup_fs_root_gen(root_backup, btrfs_header_generation(info->fs_root->node)); btrfs_set_backup_fs_root_level(root_backup, btrfs_header_level(info->fs_root->node)); } btrfs_set_backup_dev_root(root_backup, info->dev_root->node->start); btrfs_set_backup_dev_root_gen(root_backup, btrfs_header_generation(info->dev_root->node)); btrfs_set_backup_dev_root_level(root_backup, btrfs_header_level(info->dev_root->node)); btrfs_set_backup_total_bytes(root_backup, btrfs_super_total_bytes(info->super_copy)); btrfs_set_backup_bytes_used(root_backup, btrfs_super_bytes_used(info->super_copy)); btrfs_set_backup_num_devices(root_backup, btrfs_super_num_devices(info->super_copy)); /* * if we don't copy this out to the super_copy, it won't get remembered * for the next commit */ memcpy(&info->super_copy->super_roots, &info->super_for_commit->super_roots, sizeof(*root_backup) * BTRFS_NUM_BACKUP_ROOTS); } /* * Reads a backup root based on the passed priority. Prio 0 is the newest, prio * 1/2/3 are 2nd newest/3rd newest/4th (oldest) backup roots * * @fs_info: filesystem whose backup roots need to be read * @priority: priority of backup root required * * Returns backup root index on success and -EINVAL otherwise. */ static int read_backup_root(struct btrfs_fs_info *fs_info, u8 priority) { int backup_index = find_newest_super_backup(fs_info); struct btrfs_super_block *super = fs_info->super_copy; struct btrfs_root_backup *root_backup; if (priority < BTRFS_NUM_BACKUP_ROOTS && backup_index >= 0) { if (priority == 0) return backup_index; backup_index = backup_index + BTRFS_NUM_BACKUP_ROOTS - priority; backup_index %= BTRFS_NUM_BACKUP_ROOTS; } else { return -EINVAL; } root_backup = super->super_roots + backup_index; btrfs_set_super_generation(super, btrfs_backup_tree_root_gen(root_backup)); btrfs_set_super_root(super, btrfs_backup_tree_root(root_backup)); btrfs_set_super_root_level(super, btrfs_backup_tree_root_level(root_backup)); btrfs_set_super_bytes_used(super, btrfs_backup_bytes_used(root_backup)); /* * Fixme: the total bytes and num_devices need to match or we should * need a fsck */ btrfs_set_super_total_bytes(super, btrfs_backup_total_bytes(root_backup)); btrfs_set_super_num_devices(super, btrfs_backup_num_devices(root_backup)); return backup_index; } /* helper to cleanup workers */ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) { btrfs_destroy_workqueue(fs_info->fixup_workers); btrfs_destroy_workqueue(fs_info->delalloc_workers); btrfs_destroy_workqueue(fs_info->workers); if (fs_info->endio_workers) destroy_workqueue(fs_info->endio_workers); if (fs_info->rmw_workers) destroy_workqueue(fs_info->rmw_workers); if (fs_info->compressed_write_workers) destroy_workqueue(fs_info->compressed_write_workers); btrfs_destroy_workqueue(fs_info->endio_write_workers); btrfs_destroy_workqueue(fs_info->endio_freespace_worker); btrfs_destroy_workqueue(fs_info->delayed_workers); btrfs_destroy_workqueue(fs_info->caching_workers); btrfs_destroy_workqueue(fs_info->flush_workers); btrfs_destroy_workqueue(fs_info->qgroup_rescan_workers); if (fs_info->discard_ctl.discard_workers) destroy_workqueue(fs_info->discard_ctl.discard_workers); /* * Now that all other work queues are destroyed, we can safely destroy * the queues used for metadata I/O, since tasks from those other work * queues can do metadata I/O operations. */ if (fs_info->endio_meta_workers) destroy_workqueue(fs_info->endio_meta_workers); } static void free_root_extent_buffers(struct btrfs_root *root) { if (root) { free_extent_buffer(root->node); free_extent_buffer(root->commit_root); root->node = NULL; root->commit_root = NULL; } } static void free_global_root_pointers(struct btrfs_fs_info *fs_info) { struct btrfs_root *root, *tmp; rbtree_postorder_for_each_entry_safe(root, tmp, &fs_info->global_root_tree, rb_node) free_root_extent_buffers(root); } /* helper to cleanup tree roots */ static void free_root_pointers(struct btrfs_fs_info *info, bool free_chunk_root) { free_root_extent_buffers(info->tree_root); free_global_root_pointers(info); free_root_extent_buffers(info->dev_root); free_root_extent_buffers(info->quota_root); free_root_extent_buffers(info->uuid_root); free_root_extent_buffers(info->fs_root); free_root_extent_buffers(info->data_reloc_root); free_root_extent_buffers(info->block_group_root); free_root_extent_buffers(info->stripe_root); if (free_chunk_root) free_root_extent_buffers(info->chunk_root); } void btrfs_put_root(struct btrfs_root *root) { if (!root) return; if (refcount_dec_and_test(&root->refs)) { if (WARN_ON(!xa_empty(&root->inodes))) xa_destroy(&root->inodes); WARN_ON(test_bit(BTRFS_ROOT_DEAD_RELOC_TREE, &root->state)); if (root->anon_dev) free_anon_bdev(root->anon_dev); free_root_extent_buffers(root); #ifdef CONFIG_BTRFS_DEBUG spin_lock(&root->fs_info->fs_roots_radix_lock); list_del_init(&root->leak_list); spin_unlock(&root->fs_info->fs_roots_radix_lock); #endif kfree(root); } } void btrfs_free_fs_roots(struct btrfs_fs_info *fs_info) { int ret; struct btrfs_root *gang[8]; int i; while (!list_empty(&fs_info->dead_roots)) { gang[0] = list_entry(fs_info->dead_roots.next, struct btrfs_root, root_list); list_del(&gang[0]->root_list); if (test_bit(BTRFS_ROOT_IN_RADIX, &gang[0]->state)) btrfs_drop_and_free_fs_root(fs_info, gang[0]); btrfs_put_root(gang[0]); } while (1) { ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, 0, ARRAY_SIZE(gang)); if (!ret) break; for (i = 0; i < ret; i++) btrfs_drop_and_free_fs_root(fs_info, gang[i]); } } static void btrfs_init_scrub(struct btrfs_fs_info *fs_info) { mutex_init(&fs_info->scrub_lock); atomic_set(&fs_info->scrubs_running, 0); atomic_set(&fs_info->scrub_pause_req, 0); atomic_set(&fs_info->scrubs_paused, 0); atomic_set(&fs_info->scrub_cancel_req, 0); init_waitqueue_head(&fs_info->scrub_pause_wait); refcount_set(&fs_info->scrub_workers_refcnt, 0); } static void btrfs_init_balance(struct btrfs_fs_info *fs_info) { spin_lock_init(&fs_info->balance_lock); mutex_init(&fs_info->balance_mutex); atomic_set(&fs_info->balance_pause_req, 0); atomic_set(&fs_info->balance_cancel_req, 0); fs_info->balance_ctl = NULL; init_waitqueue_head(&fs_info->balance_wait_q); atomic_set(&fs_info->reloc_cancel_req, 0); } static int btrfs_init_btree_inode(struct super_block *sb) { struct btrfs_fs_info *fs_info = btrfs_sb(sb); unsigned long hash = btrfs_inode_hash(BTRFS_BTREE_INODE_OBJECTID, fs_info->tree_root); struct inode *inode; inode = new_inode(sb); if (!inode) return -ENOMEM; btrfs_set_inode_number(BTRFS_I(inode), BTRFS_BTREE_INODE_OBJECTID); set_nlink(inode, 1); /* * we set the i_size on the btree inode to the max possible int. * the real end of the address space is determined by all of * the devices in the system */ inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &btree_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); extent_io_tree_init(fs_info, &BTRFS_I(inode)->io_tree, IO_TREE_BTREE_INODE_IO); extent_map_tree_init(&BTRFS_I(inode)->extent_tree); BTRFS_I(inode)->root = btrfs_grab_root(fs_info->tree_root); set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags); __insert_inode_hash(inode, hash); fs_info->btree_inode = inode; return 0; } static void btrfs_init_dev_replace_locks(struct btrfs_fs_info *fs_info) { mutex_init(&fs_info->dev_replace.lock_finishing_cancel_unmount); init_rwsem(&fs_info->dev_replace.rwsem); init_waitqueue_head(&fs_info->dev_replace.replace_wait); } static void btrfs_init_qgroup(struct btrfs_fs_info *fs_info) { spin_lock_init(&fs_info->qgroup_lock); mutex_init(&fs_info->qgroup_ioctl_lock); fs_info->qgroup_tree = RB_ROOT; INIT_LIST_HEAD(&fs_info->dirty_qgroups); fs_info->qgroup_seq = 1; fs_info->qgroup_ulist = NULL; fs_info->qgroup_rescan_running = false; fs_info->qgroup_drop_subtree_thres = BTRFS_QGROUP_DROP_SUBTREE_THRES_DEFAULT; mutex_init(&fs_info->qgroup_rescan_lock); } static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info) { u32 max_active = fs_info->thread_pool_size; unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; unsigned int ordered_flags = WQ_MEM_RECLAIM | WQ_FREEZABLE; fs_info->workers = btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16); fs_info->delalloc_workers = btrfs_alloc_workqueue(fs_info, "delalloc", flags, max_active, 2); fs_info->flush_workers = btrfs_alloc_workqueue(fs_info, "flush_delalloc", flags, max_active, 0); fs_info->caching_workers = btrfs_alloc_workqueue(fs_info, "cache", flags, max_active, 0); fs_info->fixup_workers = btrfs_alloc_ordered_workqueue(fs_info, "fixup", ordered_flags); fs_info->endio_workers = alloc_workqueue("btrfs-endio", flags, max_active); fs_info->endio_meta_workers = alloc_workqueue("btrfs-endio-meta", flags, max_active); fs_info->rmw_workers = alloc_workqueue("btrfs-rmw", flags, max_active); fs_info->endio_write_workers = btrfs_alloc_workqueue(fs_info, "endio-write", flags, max_active, 2); fs_info->compressed_write_workers = alloc_workqueue("btrfs-compressed-write", flags, max_active); fs_info->endio_freespace_worker = btrfs_alloc_workqueue(fs_info, "freespace-write", flags, max_active, 0); fs_info->delayed_workers = btrfs_alloc_workqueue(fs_info, "delayed-meta", flags, max_active, 0); fs_info->qgroup_rescan_workers = btrfs_alloc_ordered_workqueue(fs_info, "qgroup-rescan", ordered_flags); fs_info->discard_ctl.discard_workers = alloc_ordered_workqueue("btrfs_discard", WQ_FREEZABLE); if (!(fs_info->workers && fs_info->delalloc_workers && fs_info->flush_workers && fs_info->endio_workers && fs_info->endio_meta_workers && fs_info->compressed_write_workers && fs_info->endio_write_workers && fs_info->endio_freespace_worker && fs_info->rmw_workers && fs_info->caching_workers && fs_info->fixup_workers && fs_info->delayed_workers && fs_info->qgroup_rescan_workers && fs_info->discard_ctl.discard_workers)) { return -ENOMEM; } return 0; } static int btrfs_init_csum_hash(struct btrfs_fs_info *fs_info, u16 csum_type) { struct crypto_shash *csum_shash; const char *csum_driver = btrfs_super_csum_driver(csum_type); csum_shash = crypto_alloc_shash(csum_driver, 0, 0); if (IS_ERR(csum_shash)) { btrfs_err(fs_info, "error allocating %s hash for checksum", csum_driver); return PTR_ERR(csum_shash); } fs_info->csum_shash = csum_shash; /* * Check if the checksum implementation is a fast accelerated one. * As-is this is a bit of a hack and should be replaced once the csum * implementations provide that information themselves. */ switch (csum_type) { case BTRFS_CSUM_TYPE_CRC32: if (!strstr(crypto_shash_driver_name(csum_shash), "generic")) set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); break; case BTRFS_CSUM_TYPE_XXHASH: set_bit(BTRFS_FS_CSUM_IMPL_FAST, &fs_info->flags); break; default: break; } btrfs_info(fs_info, "using %s (%s) checksum algorithm", btrfs_super_csum_name(csum_type), crypto_shash_driver_name(csum_shash)); return 0; } static int btrfs_replay_log(struct btrfs_fs_info *fs_info, struct btrfs_fs_devices *fs_devices) { int ret; struct btrfs_tree_parent_check check = { 0 }; struct btrfs_root *log_tree_root; struct btrfs_super_block *disk_super = fs_info->super_copy; u64 bytenr = btrfs_super_log_root(disk_super); int level = btrfs_super_log_root_level(disk_super); if (fs_devices->rw_devices == 0) { btrfs_warn(fs_info, "log replay required on RO media"); return -EIO; } log_tree_root = btrfs_alloc_root(fs_info, BTRFS_TREE_LOG_OBJECTID, GFP_KERNEL); if (!log_tree_root) return -ENOMEM; check.level = level; check.transid = fs_info->generation + 1; check.owner_root = BTRFS_TREE_LOG_OBJECTID; log_tree_root->node = read_tree_block(fs_info, bytenr, &check); if (IS_ERR(log_tree_root->node)) { btrfs_warn(fs_info, "failed to read log tree"); ret = PTR_ERR(log_tree_root->node); log_tree_root->node = NULL; btrfs_put_root(log_tree_root); return ret; } if (!extent_buffer_uptodate(log_tree_root->node)) { btrfs_err(fs_info, "failed to read log tree"); btrfs_put_root(log_tree_root); return -EIO; } /* returns with log_tree_root freed on success */ ret = btrfs_recover_log_trees(log_tree_root); if (ret) { btrfs_handle_fs_error(fs_info, ret, "Failed to recover log tree"); btrfs_put_root(log_tree_root); return ret; } if (sb_rdonly(fs_info->sb)) { ret = btrfs_commit_super(fs_info); if (ret) return ret; } return 0; } static int load_global_roots_objectid(struct btrfs_root *tree_root, struct btrfs_path *path, u64 objectid, const char *name) { struct btrfs_fs_info *fs_info = tree_root->fs_info; struct btrfs_root *root; u64 max_global_id = 0; int ret; struct btrfs_key key = { .objectid = objectid, .type = BTRFS_ROOT_ITEM_KEY, .offset = 0, }; bool found = false; /* If we have IGNOREDATACSUMS skip loading these roots. */ if (objectid == BTRFS_CSUM_TREE_OBJECTID && btrfs_test_opt(fs_info, IGNOREDATACSUMS)) { set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state); return 0; } while (1) { ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0); if (ret < 0) break; if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { ret = btrfs_next_leaf(tree_root, path); if (ret) { if (ret > 0) ret = 0; break; } } ret = 0; btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]); if (key.objectid != objectid) break; btrfs_release_path(path); /* * Just worry about this for extent tree, it'll be the same for * everybody. */ if (objectid == BTRFS_EXTENT_TREE_OBJECTID) max_global_id = max(max_global_id, key.offset); found = true; root = read_tree_root_path(tree_root, path, &key); if (IS_ERR(root)) { if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) ret = PTR_ERR(root); break; } set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); ret = btrfs_global_root_insert(root); if (ret) { btrfs_put_root(root); break; } key.offset++; } btrfs_release_path(path); if (objectid == BTRFS_EXTENT_TREE_OBJECTID) fs_info->nr_global_roots = max_global_id + 1; if (!found || ret) { if (objectid == BTRFS_CSUM_TREE_OBJECTID) set_bit(BTRFS_FS_STATE_NO_DATA_CSUMS, &fs_info->fs_state); if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) ret = ret ? ret : -ENOENT; else ret = 0; btrfs_err(fs_info, "failed to load root %s", name); } return ret; } static int load_global_roots(struct btrfs_root *tree_root) { struct btrfs_path *path; int ret = 0; path = btrfs_alloc_path(); if (!path) return -ENOMEM; ret = load_global_roots_objectid(tree_root, path, BTRFS_EXTENT_TREE_OBJECTID, "extent"); if (ret) goto out; ret = load_global_roots_objectid(tree_root, path, BTRFS_CSUM_TREE_OBJECTID, "csum"); if (ret) goto out; if (!btrfs_fs_compat_ro(tree_root->fs_info, FREE_SPACE_TREE)) goto out; ret = load_global_roots_objectid(tree_root, path, BTRFS_FREE_SPACE_TREE_OBJECTID, "free space"); out: btrfs_free_path(path); return ret; } static int btrfs_read_roots(struct btrfs_fs_info *fs_info) { struct btrfs_root *tree_root = fs_info->tree_root; struct btrfs_root *root; struct btrfs_key location; int ret; ASSERT(fs_info->tree_root); ret = load_global_roots(tree_root); if (ret) return ret; location.type = BTRFS_ROOT_ITEM_KEY; location.offset = 0; if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE)) { location.objectid = BTRFS_BLOCK_GROUP_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) { if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { ret = PTR_ERR(root); goto out; } } else { set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->block_group_root = root; } } location.objectid = BTRFS_DEV_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) { if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { ret = PTR_ERR(root); goto out; } } else { set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->dev_root = root; } /* Initialize fs_info for all devices in any case */ ret = btrfs_init_devices_late(fs_info); if (ret) goto out; /* * This tree can share blocks with some other fs tree during relocation * and we need a proper setup by btrfs_get_fs_root */ root = btrfs_get_fs_root(tree_root->fs_info, BTRFS_DATA_RELOC_TREE_OBJECTID, true); if (IS_ERR(root)) { if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { ret = PTR_ERR(root); goto out; } } else { set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->data_reloc_root = root; } location.objectid = BTRFS_QUOTA_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (!IS_ERR(root)) { set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->quota_root = root; } location.objectid = BTRFS_UUID_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) { if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { ret = PTR_ERR(root); if (ret != -ENOENT) goto out; } } else { set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->uuid_root = root; } if (btrfs_fs_incompat(fs_info, RAID_STRIPE_TREE)) { location.objectid = BTRFS_RAID_STRIPE_TREE_OBJECTID; root = btrfs_read_tree_root(tree_root, &location); if (IS_ERR(root)) { if (!btrfs_test_opt(fs_info, IGNOREBADROOTS)) { ret = PTR_ERR(root); goto out; } } else { set_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state); fs_info->stripe_root = root; } } return 0; out: btrfs_warn(fs_info, "failed to read root (objectid=%llu): %d", location.objectid, ret); return ret; } /* * Real super block validation * NOTE: super csum type and incompat features will not be checked here. * * @sb: super block to check * @mirror_num: the super block number to check its bytenr: * 0 the primary (1st) sb * 1, 2 2nd and 3rd backup copy * -1 skip bytenr check */ int btrfs_validate_super(const struct btrfs_fs_info *fs_info, const struct btrfs_super_block *sb, int mirror_num) { u64 nodesize = btrfs_super_nodesize(sb); u64 sectorsize = btrfs_super_sectorsize(sb); int ret = 0; const bool ignore_flags = btrfs_test_opt(fs_info, IGNORESUPERFLAGS); if (btrfs_super_magic(sb) != BTRFS_MAGIC) { btrfs_err(fs_info, "no valid FS found"); ret = -EINVAL; } if ((btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP)) { if (!ignore_flags) { btrfs_err(fs_info, "unrecognized or unsupported super flag 0x%llx", btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); ret = -EINVAL; } else { btrfs_info(fs_info, "unrecognized or unsupported super flags: 0x%llx, ignored", btrfs_super_flags(sb) & ~BTRFS_SUPER_FLAG_SUPP); } } if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) { btrfs_err(fs_info, "tree_root level too big: %d >= %d", btrfs_super_root_level(sb), BTRFS_MAX_LEVEL); ret = -EINVAL; } if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) { btrfs_err(fs_info, "chunk_root level too big: %d >= %d", btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL); ret = -EINVAL; } if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) { btrfs_err(fs_info, "log_root level too big: %d >= %d", btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL); ret = -EINVAL; } /* * Check sectorsize and nodesize first, other check will need it. * Check all possible sectorsize(4K, 8K, 16K, 32K, 64K) here. */ if (!is_power_of_2(sectorsize) || sectorsize < 4096 || sectorsize > BTRFS_MAX_METADATA_BLOCKSIZE) { btrfs_err(fs_info, "invalid sectorsize %llu", sectorsize); ret = -EINVAL; } /* * We only support at most two sectorsizes: 4K and PAGE_SIZE. * * We can support 16K sectorsize with 64K page size without problem, * but such sectorsize/pagesize combination doesn't make much sense. * 4K will be our future standard, PAGE_SIZE is supported from the very * beginning. */ if (sectorsize > PAGE_SIZE || (sectorsize != SZ_4K && sectorsize != PAGE_SIZE)) { btrfs_err(fs_info, "sectorsize %llu not yet supported for page size %lu", sectorsize, PAGE_SIZE); ret = -EINVAL; } if (!is_power_of_2(nodesize) || nodesize < sectorsize || nodesize > BTRFS_MAX_METADATA_BLOCKSIZE) { btrfs_err(fs_info, "invalid nodesize %llu", nodesize); ret = -EINVAL; } if (nodesize != le32_to_cpu(sb->__unused_leafsize)) { btrfs_err(fs_info, "invalid leafsize %u, should be %llu", le32_to_cpu(sb->__unused_leafsize), nodesize); ret = -EINVAL; } /* Root alignment check */ if (!IS_ALIGNED(btrfs_super_root(sb), sectorsize)) { btrfs_warn(fs_info, "tree_root block unaligned: %llu", btrfs_super_root(sb)); ret = -EINVAL; } if (!IS_ALIGNED(btrfs_super_chunk_root(sb), sectorsize)) { btrfs_warn(fs_info, "chunk_root block unaligned: %llu", btrfs_super_chunk_root(sb)); ret = -EINVAL; } if (!IS_ALIGNED(btrfs_super_log_root(sb), sectorsize)) { btrfs_warn(fs_info, "log_root block unaligned: %llu", btrfs_super_log_root(sb)); ret = -EINVAL; } if (!fs_info->fs_devices->temp_fsid && memcmp(fs_info->fs_devices->fsid, sb->fsid, BTRFS_FSID_SIZE) != 0) { btrfs_err(fs_info, "superblock fsid doesn't match fsid of fs_devices: %pU != %pU", sb->fsid, fs_info->fs_devices->fsid); ret = -EINVAL; } if (memcmp(fs_info->fs_devices->metadata_uuid, btrfs_sb_fsid_ptr(sb), BTRFS_FSID_SIZE) != 0) { btrfs_err(fs_info, "superblock metadata_uuid doesn't match metadata uuid of fs_devices: %pU != %pU", btrfs_sb_fsid_ptr(sb), fs_info->fs_devices->metadata_uuid); ret = -EINVAL; } if (memcmp(fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid, BTRFS_FSID_SIZE) != 0) { btrfs_err(fs_info, "dev_item UUID does not match metadata fsid: %pU != %pU", fs_info->fs_devices->metadata_uuid, sb->dev_item.fsid); ret = -EINVAL; } /* * Artificial requirement for block-group-tree to force newer features * (free-space-tree, no-holes) so the test matrix is smaller. */ if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) && (!btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID) || !btrfs_fs_incompat(fs_info, NO_HOLES))) { btrfs_err(fs_info, "block-group-tree feature requires free-space-tree and no-holes"); ret = -EINVAL; } /* * Hint to catch really bogus numbers, bitflips or so, more exact checks are * done later */ if (btrfs_super_bytes_used(sb) < 6 * btrfs_super_nodesize(sb)) { btrfs_err(fs_info, "bytes_used is too small %llu", btrfs_super_bytes_used(sb)); ret = -EINVAL; } if (!is_power_of_2(btrfs_super_stripesize(sb))) { btrfs_err(fs_info, "invalid stripesize %u", btrfs_super_stripesize(sb)); ret = -EINVAL; } if (btrfs_super_num_devices(sb) > (1UL << 31)) btrfs_warn(fs_info, "suspicious number of devices: %llu", btrfs_super_num_devices(sb)); if (btrfs_super_num_devices(sb) == 0) { btrfs_err(fs_info, "number of devices is 0"); ret = -EINVAL; } if (mirror_num >= 0 && btrfs_super_bytenr(sb) != btrfs_sb_offset(mirror_num)) { btrfs_err(fs_info, "super offset mismatch %llu != %u", btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET); ret = -EINVAL; } /* * Obvious sys_chunk_array corruptions, it must hold at least one key * and one chunk */ if (btrfs_super_sys_array_size(sb) > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) { btrfs_err(fs_info, "system chunk array too big %u > %u", btrfs_super_sys_array_size(sb), BTRFS_SYSTEM_CHUNK_ARRAY_SIZE); ret = -EINVAL; } if (btrfs_super_sys_array_size(sb) < sizeof(struct btrfs_disk_key) + sizeof(struct btrfs_chunk)) { btrfs_err(fs_info, "system chunk array too small %u < %zu", btrfs_super_sys_array_size(sb), sizeof(struct btrfs_disk_key) + sizeof(struct btrfs_chunk)); ret = -EINVAL; } /* * The generation is a global counter, we'll trust it more than the others * but it's still possible that it's the one that's wrong. */ if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb)) btrfs_warn(fs_info, "suspicious: generation < chunk_root_generation: %llu < %llu", btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb)); if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb) && btrfs_super_cache_generation(sb) != (u64)-1) btrfs_warn(fs_info, "suspicious: generation < cache_generation: %llu < %llu", btrfs_super_generation(sb), btrfs_super_cache_generation(sb)); return ret; } /* * Validation of super block at mount time. * Some checks already done early at mount time, like csum type and incompat * flags will be skipped. */ static int btrfs_validate_mount_super(struct btrfs_fs_info *fs_info) { return btrfs_validate_super(fs_info, fs_info->super_copy, 0); } /* * Validation of super block at write time. * Some checks like bytenr check will be skipped as their values will be * overwritten soon. * Extra checks like csum type and incompat flags will be done here. */ static int btrfs_validate_write_super(struct btrfs_fs_info *fs_info, struct btrfs_super_block *sb) { int ret; ret = btrfs_validate_super(fs_info, sb, -1); if (ret < 0) goto out; if (!btrfs_supported_super_csum(btrfs_super_csum_type(sb))) { ret = -EUCLEAN; btrfs_err(fs_info, "invalid csum type, has %u want %u", btrfs_super_csum_type(sb), BTRFS_CSUM_TYPE_CRC32); goto out; } if (btrfs_super_incompat_flags(sb) & ~BTRFS_FEATURE_INCOMPAT_SUPP) { ret = -EUCLEAN; btrfs_err(fs_info, "invalid incompat flags, has 0x%llx valid mask 0x%llx", btrfs_super_incompat_flags(sb), (unsigned long long)BTRFS_FEATURE_INCOMPAT_SUPP); goto out; } out: if (ret < 0) btrfs_err(fs_info, "super block corruption detected before writing it to disk"); return ret; } static int load_super_root(struct btrfs_root *root, u64 bytenr, u64 gen, int level) { struct btrfs_tree_parent_check check = { .level = level, .transid = gen, .owner_root = btrfs_root_id(root) }; int ret = 0; root->node = read_tree_block(root->fs_info, bytenr, &check); if (IS_ERR(root->node)) { ret = PTR_ERR(root->node); root->node = NULL; return ret; } if (!extent_buffer_uptodate(root->node)) { free_extent_buffer(root->node); root->node = NULL; return -EIO; } btrfs_set_root_node(&root->root_item, root->node); root->commit_root = btrfs_root_node(root); btrfs_set_root_refs(&root->root_item, 1); return ret; } static int load_important_roots(struct btrfs_fs_info *fs_info) { struct btrfs_super_block *sb = fs_info->super_copy; u64 gen, bytenr; int level, ret; bytenr = btrfs_super_root(sb); gen = btrfs_super_generation(sb); level = btrfs_super_root_level(sb); ret = load_super_root(fs_info->tree_root, bytenr, gen, level); if (ret) { btrfs_warn(fs_info, "couldn't read tree root"); return ret; } return 0; } static int __cold init_tree_roots(struct btrfs_fs_info *fs_info) { int backup_index = find_newest_super_backup(fs_info); struct btrfs_super_block *sb = fs_info->super_copy; struct btrfs_root *tree_root = fs_info->tree_root; bool handle_error = false; int ret = 0; int i; for (i = 0; i < BTRFS_NUM_BACKUP_ROOTS; i++) { if (handle_error) { if (!IS_ERR(tree_root->node)) free_extent_buffer(tree_root->node); tree_root->node = NULL; if (!btrfs_test_opt(fs_info, USEBACKUPROOT)) break; free_root_pointers(fs_info, 0); /* * Don't use the log in recovery mode, it won't be * valid */ btrfs_set_super_log_root(sb, 0); btrfs_warn(fs_info, "try to load backup roots slot %d", i); ret = read_backup_root(fs_info, i); backup_index = ret; if (ret < 0) return ret; } ret = load_important_roots(fs_info); if (ret) { handle_error = true; continue; } /* * No need to hold btrfs_root::objectid_mutex since the fs * hasn't been fully initialised and we are the only user */ ret = btrfs_init_root_free_objectid(tree_root); if (ret < 0) { handle_error = true; continue; } ASSERT(tree_root->free_objectid <= BTRFS_LAST_FREE_OBJECTID); ret = btrfs_read_roots(fs_info); if (ret < 0) { handle_error = true; continue; } /* All successful */ fs_info->generation = btrfs_header_generation(tree_root->node); btrfs_set_last_trans_committed(fs_info, fs_info->generation); fs_info->last_reloc_trans = 0; /* Always begin writing backup roots after the one being used */ if (backup_index < 0) { fs_info->backup_root_index = 0; } else { fs_info->backup_root_index = backup_index + 1; fs_info->backup_root_index %= BTRFS_NUM_BACKUP_ROOTS; } break; } return ret; } void btrfs_init_fs_info(struct btrfs_fs_info *fs_info) { INIT_RADIX_TREE(&fs_info->fs_roots_radix, GFP_ATOMIC); INIT_RADIX_TREE(&fs_info->buffer_radix, GFP_ATOMIC); INIT_LIST_HEAD(&fs_info->trans_list); INIT_LIST_HEAD(&fs_info->dead_roots); INIT_LIST_HEAD(&fs_info->delayed_iputs); INIT_LIST_HEAD(&fs_info->delalloc_roots); INIT_LIST_HEAD(&fs_info->caching_block_groups); spin_lock_init(&fs_info->delalloc_root_lock); spin_lock_init(&fs_info->trans_lock); spin_lock_init(&fs_info->fs_roots_radix_lock); spin_lock_init(&fs_info->delayed_iput_lock); spin_lock_init(&fs_info->defrag_inodes_lock); spin_lock_init(&fs_info->super_lock); spin_lock_init(&fs_info->buffer_lock); spin_lock_init(&fs_info->unused_bgs_lock); spin_lock_init(&fs_info->treelog_bg_lock); spin_lock_init(&fs_info->zone_active_bgs_lock); spin_lock_init(&fs_info->relocation_bg_lock); rwlock_init(&fs_info->tree_mod_log_lock); rwlock_init(&fs_info->global_root_lock); mutex_init(&fs_info->unused_bg_unpin_mutex); mutex_init(&fs_info->reclaim_bgs_lock); mutex_init(&fs_info->reloc_mutex); mutex_init(&fs_info->delalloc_root_mutex); mutex_init(&fs_info->zoned_meta_io_lock); mutex_init(&fs_info->zoned_data_reloc_io_lock); seqlock_init(&fs_info->profiles_lock); btrfs_lockdep_init_map(fs_info, btrfs_trans_num_writers); btrfs_lockdep_init_map(fs_info, btrfs_trans_num_extwriters); btrfs_lockdep_init_map(fs_info, btrfs_trans_pending_ordered); btrfs_lockdep_init_map(fs_info, btrfs_ordered_extent); btrfs_state_lockdep_init_map(fs_info, btrfs_trans_commit_prep, BTRFS_LOCKDEP_TRANS_COMMIT_PREP); btrfs_state_lockdep_init_map(fs_info, btrfs_trans_unblocked, BTRFS_LOCKDEP_TRANS_UNBLOCKED); btrfs_state_lockdep_init_map(fs_info, btrfs_trans_super_committed, BTRFS_LOCKDEP_TRANS_SUPER_COMMITTED); btrfs_state_lockdep_init_map(fs_info, btrfs_trans_completed, BTRFS_LOCKDEP_TRANS_COMPLETED); INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); INIT_LIST_HEAD(&fs_info->space_info); INIT_LIST_HEAD(&fs_info->tree_mod_seq_list); INIT_LIST_HEAD(&fs_info->unused_bgs); INIT_LIST_HEAD(&fs_info->reclaim_bgs); INIT_LIST_HEAD(&fs_info->zone_active_bgs); #ifdef CONFIG_BTRFS_DEBUG INIT_LIST_HEAD(&fs_info->allocated_roots); INIT_LIST_HEAD(&fs_info->allocated_ebs); spin_lock_init(&fs_info->eb_leak_lock); #endif fs_info->mapping_tree = RB_ROOT_CACHED; rwlock_init(&fs_info->mapping_tree_lock); btrfs_init_block_rsv(&fs_info->global_block_rsv, BTRFS_BLOCK_RSV_GLOBAL); btrfs_init_block_rsv(&fs_info->trans_block_rsv, BTRFS_BLOCK_RSV_TRANS); btrfs_init_block_rsv(&fs_info->chunk_block_rsv, BTRFS_BLOCK_RSV_CHUNK); btrfs_init_block_rsv(&fs_info->empty_block_rsv, BTRFS_BLOCK_RSV_EMPTY); btrfs_init_block_rsv(&fs_info->delayed_block_rsv, BTRFS_BLOCK_RSV_DELOPS); btrfs_init_block_rsv(&fs_info->delayed_refs_rsv, BTRFS_BLOCK_RSV_DELREFS); atomic_set(&fs_info->async_delalloc_pages, 0); atomic_set(&fs_info->defrag_running, 0); atomic_set(&fs_info->nr_delayed_iputs, 0); atomic64_set(&fs_info->tree_mod_seq, 0); fs_info->global_root_tree = RB_ROOT; fs_info->max_inline = BTRFS_DEFAULT_MAX_INLINE; fs_info->metadata_ratio = 0; fs_info->defrag_inodes = RB_ROOT; atomic64_set(&fs_info->free_chunk_space, 0); fs_info->tree_mod_log = RB_ROOT; fs_info->commit_interval = BTRFS_DEFAULT_COMMIT_INTERVAL; btrfs_init_ref_verify(fs_info); fs_info->thread_pool_size = min_t(unsigned long, num_online_cpus() + 2, 8); INIT_LIST_HEAD(&fs_info->ordered_roots); spin_lock_init(&fs_info->ordered_root_lock); btrfs_init_scrub(fs_info); btrfs_init_balance(fs_info); btrfs_init_async_reclaim_work(fs_info); btrfs_init_extent_map_shrinker_work(fs_info); rwlock_init(&fs_info->block_group_cache_lock); fs_info->block_group_cache_tree = RB_ROOT_CACHED; extent_io_tree_init(fs_info, &fs_info->excluded_extents, IO_TREE_FS_EXCLUDED_EXTENTS); mutex_init(&fs_info->ordered_operations_mutex); mutex_init(&fs_info->tree_log_mutex); mutex_init(&fs_info->chunk_mutex); mutex_init(&fs_info->transaction_kthread_mutex); mutex_init(&fs_info->cleaner_mutex); mutex_init(&fs_info->ro_block_group_mutex); init_rwsem(&fs_info->commit_root_sem); init_rwsem(&fs_info->cleanup_work_sem); init_rwsem(&fs_info->subvol_sem); sema_init(&fs_info->uuid_tree_rescan_sem, 1); btrfs_init_dev_replace_locks(fs_info); btrfs_init_qgroup(fs_info); btrfs_discard_init(fs_info); btrfs_init_free_cluster(&fs_info->meta_alloc_cluster); btrfs_init_free_cluster(&fs_info->data_alloc_cluster); init_waitqueue_head(&fs_info->transaction_throttle); init_waitqueue_head(&fs_info->transaction_wait); init_waitqueue_head(&fs_info->transaction_blocked_wait); init_waitqueue_head(&fs_info->async_submit_wait); init_waitqueue_head(&fs_info->delayed_iputs_wait); /* Usable values until the real ones are cached from the superblock */ fs_info->nodesize = 4096; fs_info->sectorsize = 4096; fs_info->sectorsize_bits = ilog2(4096); fs_info->stripesize = 4096; /* Default compress algorithm when user does -o compress */ fs_info->compress_type = BTRFS_COMPRESS_ZLIB; fs_info->max_extent_size = BTRFS_MAX_EXTENT_SIZE; spin_lock_init(&fs_info->swapfile_pins_lock); fs_info->swapfile_pins = RB_ROOT; fs_info->bg_reclaim_threshold = BTRFS_DEFAULT_RECLAIM_THRESH; INIT_WORK(&fs_info->reclaim_bgs_work, btrfs_reclaim_bgs_work); } static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block *sb) { int ret; fs_info->sb = sb; /* Temporary fixed values for block size until we read the superblock. */ sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE; sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE); ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL); if (ret) return ret; ret = percpu_counter_init(&fs_info->evictable_extent_maps, 0, GFP_KERNEL); if (ret) return ret; ret = percpu_counter_init(&fs_info->dirty_metadata_bytes, 0, GFP_KERNEL); if (ret) return ret; fs_info->dirty_metadata_batch = PAGE_SIZE * (1 + ilog2(nr_cpu_ids)); ret = percpu_counter_init(&fs_info->delalloc_bytes, 0, GFP_KERNEL); if (ret) return ret; ret = percpu_counter_init(&fs_info->dev_replace.bio_counter, 0, GFP_KERNEL); if (ret) return ret; fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), GFP_KERNEL); if (!fs_info->delayed_root) return -ENOMEM; btrfs_init_delayed_root(fs_info->delayed_root); if (sb_rdonly(sb)) set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state); if (btrfs_test_opt(fs_info, IGNOREMETACSUMS)) set_bit(BTRFS_FS_STATE_SKIP_META_CSUMS, &fs_info->fs_state); return btrfs_alloc_stripe_hash_table(fs_info); } static int btrfs_uuid_rescan_kthread(void *data) { struct btrfs_fs_info *fs_info = data; int ret; /* * 1st step is to iterate through the existing UUID tree and * to delete all entries that contain outdated data. * 2nd step is to add all missing entries to the UUID tree. */ ret = btrfs_uuid_tree_iterate(fs_info); if (ret < 0) { if (ret != -EINTR) btrfs_warn(fs_info, "iterating uuid_tree failed %d", ret); up(&fs_info->uuid_tree_rescan_sem); return ret; } return btrfs_uuid_scan_kthread(data); } static int btrfs_check_uuid_tree(struct btrfs_fs_info *fs_info) { struct task_struct *task; down(&fs_info->uuid_tree_rescan_sem); task = kthread_run(btrfs_uuid_rescan_kthread, fs_info, "btrfs-uuid"); if (IS_ERR(task)) { /* fs_info->update_uuid_tree_gen remains 0 in all error case */ btrfs_warn(fs_info, "failed to start uuid_rescan task"); up(&fs_info->uuid_tree_rescan_sem); return PTR_ERR(task); } return 0; } static int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info) { u64 root_objectid = 0; struct btrfs_root *gang[8]; int ret = 0; while (1) { unsigned int found; spin_lock(&fs_info->fs_roots_radix_lock); found = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, root_objectid, ARRAY_SIZE(gang)); if (!found) { spin_unlock(&fs_info->fs_roots_radix_lock); break; } root_objectid = btrfs_root_id(gang[found - 1]) + 1; for (int i = 0; i < found; i++) { /* Avoid to grab roots in dead_roots. */ if (btrfs_root_refs(&gang[i]->root_item) == 0) { gang[i] = NULL; continue; } /* Grab all the search result for later use. */ gang[i] = btrfs_grab_root(gang[i]); } spin_unlock(&fs_info->fs_roots_radix_lock); for (int i = 0; i < found; i++) { if (!gang[i]) continue; root_objectid = btrfs_root_id(gang[i]); /* * Continue to release the remaining roots after the first * error without cleanup and preserve the first error * for the return. */ if (!ret) ret = btrfs_orphan_cleanup(gang[i]); btrfs_put_root(gang[i]); } if (ret) break; root_objectid++; } return ret; } /* * Mounting logic specific to read-write file systems. Shared by open_ctree * and btrfs_remount when remounting from read-only to read-write. */ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info) { int ret; const bool cache_opt = btrfs_test_opt(fs_info, SPACE_CACHE); bool rebuild_free_space_tree = false; if (btrfs_test_opt(fs_info, CLEAR_CACHE) && btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { if (btrfs_fs_incompat(fs_info, EXTENT_TREE_V2)) btrfs_warn(fs_info, "'clear_cache' option is ignored with extent tree v2"); else rebuild_free_space_tree = true; } else if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE_VALID)) { btrfs_warn(fs_info, "free space tree is invalid"); rebuild_free_space_tree = true; } if (rebuild_free_space_tree) { btrfs_info(fs_info, "rebuilding free space tree"); ret = btrfs_rebuild_free_space_tree(fs_info); if (ret) { btrfs_warn(fs_info, "failed to rebuild free space tree: %d", ret); goto out; } } if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE) && !btrfs_test_opt(fs_info, FREE_SPACE_TREE)) { btrfs_info(fs_info, "disabling free space tree"); ret = btrfs_delete_free_space_tree(fs_info); if (ret) { btrfs_warn(fs_info, "failed to disable free space tree: %d", ret); goto out; } } /* * btrfs_find_orphan_roots() is responsible for finding all the dead * roots (with 0 refs), flag them with BTRFS_ROOT_DEAD_TREE and load * them into the fs_info->fs_roots_radix tree. This must be done before * calling btrfs_orphan_cleanup() on the tree root. If we don't do it * first, then btrfs_orphan_cleanup() will delete a dead root's orphan * item before the root's tree is deleted - this means that if we unmount * or crash before the deletion completes, on the next mount we will not * delete what remains of the tree because the orphan item does not * exists anymore, which is what tells us we have a pending deletion. */ ret = btrfs_find_orphan_roots(fs_info); if (ret) goto out; ret = btrfs_cleanup_fs_roots(fs_info); if (ret) goto out; down_read(&fs_info->cleanup_work_sem); if ((ret = btrfs_orphan_cleanup(fs_info->fs_root)) || (ret = btrfs_orphan_cleanup(fs_info->tree_root))) { up_read(&fs_info->cleanup_work_sem); goto out; } up_read(&fs_info->cleanup_work_sem); mutex_lock(&fs_info->cleaner_mutex); ret = btrfs_recover_relocation(fs_info); mutex_unlock(&fs_info->cleaner_mutex); if (ret < 0) { btrfs_warn(fs_info, "failed to recover relocation: %d", ret); goto out; } if (btrfs_test_opt(fs_info, FREE_SPACE_TREE) && !btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE)) { btrfs_info(fs_info, "creating free space tree"); ret = btrfs_create_free_space_tree(fs_info); if (ret) { btrfs_warn(fs_info, "failed to create free space tree: %d", ret); goto out; } } if (cache_opt != btrfs_free_space_cache_v1_active(fs_info)) { ret = btrfs_set_free_space_cache_v1_active(fs_info, cache_opt); if (ret) goto out; } ret = btrfs_resume_balance_async(fs_info); if (ret) goto out; ret = btrfs_resume_dev_replace_async(fs_info); if (ret) { btrfs_warn(fs_info, "failed to resume dev_replace"); goto out; } btrfs_qgroup_rescan_resume(fs_info); if (!fs_info->uuid_root) { btrfs_info(fs_info, "creating UUID tree"); ret = btrfs_create_uuid_tree(fs_info); if (ret) { btrfs_warn(fs_info, "failed to create the UUID tree %d", ret); goto out; } } out: return ret; } /* * Do various sanity and dependency checks of different features. * * @is_rw_mount: If the mount is read-write. * * This is the place for less strict checks (like for subpage or artificial * feature dependencies). * * For strict checks or possible corruption detection, see * btrfs_validate_super(). * * This should be called after btrfs_parse_options(), as some mount options * (space cache related) can modify on-disk format like free space tree and * screw up certain feature dependencies. */ int btrfs_check_features(struct btrfs_fs_info *fs_info, bool is_rw_mount) { struct btrfs_super_block *disk_super = fs_info->super_copy; u64 incompat = btrfs_super_incompat_flags(disk_super); const u64 compat_ro = btrfs_super_compat_ro_flags(disk_super); const u64 compat_ro_unsupp = (compat_ro & ~BTRFS_FEATURE_COMPAT_RO_SUPP); if (incompat & ~BTRFS_FEATURE_INCOMPAT_SUPP) { btrfs_err(fs_info, "cannot mount because of unknown incompat features (0x%llx)", incompat); return -EINVAL; } /* Runtime limitation for mixed block groups. */ if ((incompat & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) && (fs_info->sectorsize != fs_info->nodesize)) { btrfs_err(fs_info, "unequal nodesize/sectorsize (%u != %u) are not allowed for mixed block groups", fs_info->nodesize, fs_info->sectorsize); return -EINVAL; } /* Mixed backref is an always-enabled feature. */ incompat |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; /* Set compression related flags just in case. */ if (fs_info->compress_type == BTRFS_COMPRESS_LZO) incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD) incompat |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD; /* * An ancient flag, which should really be marked deprecated. * Such runtime limitation doesn't really need a incompat flag. */ if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) incompat |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA; if (compat_ro_unsupp && is_rw_mount) { btrfs_err(fs_info, "cannot mount read-write because of unknown compat_ro features (0x%llx)", compat_ro); return -EINVAL; } /* * We have unsupported RO compat features, although RO mounted, we * should not cause any metadata writes, including log replay. * Or we could screw up whatever the new feature requires. */ if (compat_ro_unsupp && btrfs_super_log_root(disk_super) && !btrfs_test_opt(fs_info, NOLOGREPLAY)) { btrfs_err(fs_info, "cannot replay dirty log with unsupported compat_ro features (0x%llx), try rescue=nologreplay", compat_ro); return -EINVAL; } /* * Artificial limitations for block group tree, to force * block-group-tree to rely on no-holes and free-space-tree. */ if (btrfs_fs_compat_ro(fs_info, BLOCK_GROUP_TREE) && (!btrfs_fs_incompat(fs_info, NO_HOLES) || !btrfs_test_opt(fs_info, FREE_SPACE_TREE))) { btrfs_err(fs_info, "block-group-tree feature requires no-holes and free-space-tree features"); return -EINVAL; } /* * Subpage runtime limitation on v1 cache. * * V1 space cache still has some hard codeed PAGE_SIZE usage, while * we're already defaulting to v2 cache, no need to bother v1 as it's * going to be deprecated anyway. */ if (fs_info->sectorsize < PAGE_SIZE && btrfs_test_opt(fs_info, SPACE_CACHE)) { btrfs_warn(fs_info, "v1 space cache is not supported for page size %lu with sectorsize %u", PAGE_SIZE, fs_info->sectorsize); return -EINVAL; } /* This can be called by remount, we need to protect the super block. */ spin_lock(&fs_info->super_lock); btrfs_set_super_incompat_flags(disk_super, incompat); spin_unlock(&fs_info->super_lock); return 0; } int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_devices) { u32 sectorsize; u32 nodesize; u32 stripesize; u64 generation; u16 csum_type; struct btrfs_super_block *disk_super; struct btrfs_fs_info *fs_info = btrfs_sb(sb); struct btrfs_root *tree_root; struct btrfs_root *chunk_root; int ret; int level; ret = init_mount_fs_info(fs_info, sb); if (ret) goto fail; /* These need to be init'ed before we start creating inodes and such. */ tree_root = btrfs_alloc_root(fs_info, BTRFS_ROOT_TREE_OBJECTID, GFP_KERNEL); fs_info->tree_root = tree_root; chunk_root = btrfs_alloc_root(fs_info, BTRFS_CHUNK_TREE_OBJECTID, GFP_KERNEL); fs_info->chunk_root = chunk_root; if (!tree_root || !chunk_root) { ret = -ENOMEM; goto fail; } ret = btrfs_init_btree_inode(sb); if (ret) goto fail; invalidate_bdev(fs_devices->latest_dev->bdev); /* * Read super block and check the signature bytes only */ disk_super = btrfs_read_dev_super(fs_devices->latest_dev->bdev); if (IS_ERR(disk_super)) { ret = PTR_ERR(disk_super); goto fail_alloc; } btrfs_info(fs_info, "first mount of filesystem %pU", disk_super->fsid); /* * Verify the type first, if that or the checksum value are * corrupted, we'll find out */ csum_type = btrfs_super_csum_type(disk_super); if (!btrfs_supported_super_csum(csum_type)) { btrfs_err(fs_info, "unsupported checksum algorithm: %u", csum_type); ret = -EINVAL; btrfs_release_disk_super(disk_super); goto fail_alloc; } fs_info->csum_size = btrfs_super_csum_size(disk_super); ret = btrfs_init_csum_hash(fs_info, csum_type); if (ret) { btrfs_release_disk_super(disk_super); goto fail_alloc; } /* * We want to check superblock checksum, the type is stored inside. * Pass the whole disk block of size BTRFS_SUPER_INFO_SIZE (4k). */ if (btrfs_check_super_csum(fs_info, disk_super)) { btrfs_err(fs_info, "superblock checksum mismatch"); ret = -EINVAL; btrfs_release_disk_super(disk_super); goto fail_alloc; } /* * super_copy is zeroed at allocation time and we never touch the * following bytes up to INFO_SIZE, the checksum is calculated from * the whole block of INFO_SIZE */ memcpy(fs_info->super_copy, disk_super, sizeof(*fs_info->super_copy)); btrfs_release_disk_super(disk_super); disk_super = fs_info->super_copy; memcpy(fs_info->super_for_commit, fs_info->super_copy, sizeof(*fs_info->super_for_commit)); ret = btrfs_validate_mount_super(fs_info); if (ret) { btrfs_err(fs_info, "superblock contains fatal errors"); ret = -EINVAL; goto fail_alloc; } if (!btrfs_super_root(disk_super)) { btrfs_err(fs_info, "invalid superblock tree root bytenr"); ret = -EINVAL; goto fail_alloc; } /* check FS state, whether FS is broken. */ if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_ERROR) WRITE_ONCE(fs_info->fs_error, -EUCLEAN); /* Set up fs_info before parsing mount options */ nodesize = btrfs_super_nodesize(disk_super); sectorsize = btrfs_super_sectorsize(disk_super); stripesize = sectorsize; fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids)); fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids)); fs_info->nodesize = nodesize; fs_info->sectorsize = sectorsize; fs_info->sectorsize_bits = ilog2(sectorsize); fs_info->sectors_per_page = (PAGE_SIZE >> fs_info->sectorsize_bits); fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size; fs_info->stripesize = stripesize; /* * Handle the space caching options appropriately now that we have the * super block loaded and validated. */ btrfs_set_free_space_cache_settings(fs_info); if (!btrfs_check_options(fs_info, &fs_info->mount_opt, sb->s_flags)) { ret = -EINVAL; goto fail_alloc; } ret = btrfs_check_features(fs_info, !sb_rdonly(sb)); if (ret < 0) goto fail_alloc; /* * At this point our mount options are validated, if we set ->max_inline * to something non-standard make sure we truncate it to sectorsize. */ fs_info->max_inline = min_t(u64, fs_info->max_inline, fs_info->sectorsize); if (sectorsize < PAGE_SIZE) btrfs_warn(fs_info, "read-write for sector size %u with page size %lu is experimental", sectorsize, PAGE_SIZE); ret = btrfs_init_workqueues(fs_info); if (ret) goto fail_sb_buffer; sb->s_bdi->ra_pages *= btrfs_super_num_devices(disk_super); sb->s_bdi->ra_pages = max(sb->s_bdi->ra_pages, SZ_4M / PAGE_SIZE); /* Update the values for the current filesystem. */ sb->s_blocksize = sectorsize; sb->s_blocksize_bits = blksize_bits(sectorsize); memcpy(&sb->s_uuid, fs_info->fs_devices->fsid, BTRFS_FSID_SIZE); mutex_lock(&fs_info->chunk_mutex); ret = btrfs_read_sys_array(fs_info); mutex_unlock(&fs_info->chunk_mutex); if (ret) { btrfs_err(fs_info, "failed to read the system array: %d", ret); goto fail_sb_buffer; } generation = btrfs_super_chunk_root_generation(disk_super); level = btrfs_super_chunk_root_level(disk_super); ret = load_super_root(chunk_root, btrfs_super_chunk_root(disk_super), generation, level); if (ret) { btrfs_err(fs_info, "failed to read chunk root"); goto fail_tree_roots; } read_extent_buffer(chunk_root->node, fs_info->chunk_tree_uuid, offsetof(struct btrfs_header, chunk_tree_uuid), BTRFS_UUID_SIZE); ret = btrfs_read_chunk_tree(fs_info); if (ret) { btrfs_err(fs_info, "failed to read chunk tree: %d", ret); goto fail_tree_roots; } /* * At this point we know all the devices that make this filesystem, * including the seed devices but we don't know yet if the replace * target is required. So free devices that are not part of this * filesystem but skip the replace target device which is checked * below in btrfs_init_dev_replace(). */ btrfs_free_extra_devids(fs_devices); if (!fs_devices->latest_dev->bdev) { btrfs_err(fs_info, "failed to read devices"); ret = -EIO; goto fail_tree_roots; } ret = init_tree_roots(fs_info); if (ret) goto fail_tree_roots; /* * Get zone type information of zoned block devices. This will also * handle emulation of a zoned filesystem if a regular device has the * zoned incompat feature flag set. */ ret = btrfs_get_dev_zone_info_all_devices(fs_info); if (ret) { btrfs_err(fs_info, "zoned: failed to read device zone info: %d", ret); goto fail_block_groups; } /* * If we have a uuid root and we're not being told to rescan we need to * check the generation here so we can set the * BTRFS_FS_UPDATE_UUID_TREE_GEN bit. Otherwise we could commit the * transaction during a balance or the log replay without updating the * uuid generation, and then if we crash we would rescan the uuid tree, * even though it was perfectly fine. */ if (fs_info->uuid_root && !btrfs_test_opt(fs_info, RESCAN_UUID_TREE) && fs_info->generation == btrfs_super_uuid_tree_generation(disk_super)) set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags); ret = btrfs_verify_dev_extents(fs_info); if (ret) { btrfs_err(fs_info, "failed to verify dev extents against chunks: %d", ret); goto fail_block_groups; } ret = btrfs_recover_balance(fs_info); if (ret) { btrfs_err(fs_info, "failed to recover balance: %d", ret); goto fail_block_groups; } ret = btrfs_init_dev_stats(fs_info); if (ret) { btrfs_err(fs_info, "failed to init dev_stats: %d", ret); goto fail_block_groups; } ret = btrfs_init_dev_replace(fs_info); if (ret) { btrfs_err(fs_info, "failed to init dev_replace: %d", ret); goto fail_block_groups; } ret = btrfs_check_zoned_mode(fs_info); if (ret) { btrfs_err(fs_info, "failed to initialize zoned mode: %d", ret); goto fail_block_groups; } ret = btrfs_sysfs_add_fsid(fs_devices); if (ret) { btrfs_err(fs_info, "failed to init sysfs fsid interface: %d", ret); goto fail_block_groups; } ret = btrfs_sysfs_add_mounted(fs_info); if (ret) { btrfs_err(fs_info, "failed to init sysfs interface: %d", ret); goto fail_fsdev_sysfs; } ret = btrfs_init_space_info(fs_info); if (ret) { btrfs_err(fs_info, "failed to initialize space info: %d", ret); goto fail_sysfs; } ret = btrfs_read_block_groups(fs_info); if (ret) { btrfs_err(fs_info, "failed to read block groups: %d", ret); goto fail_sysfs; } btrfs_free_zone_cache(fs_info); btrfs_check_active_zone_reservation(fs_info); if (!sb_rdonly(sb) && fs_info->fs_devices->missing_devices && !btrfs_check_rw_degradable(fs_info, NULL)) { btrfs_warn(fs_info, "writable mount is not allowed due to too many missing devices"); ret = -EINVAL; goto fail_sysfs; } fs_info->cleaner_kthread = kthread_run(cleaner_kthread, fs_info, "btrfs-cleaner"); if (IS_ERR(fs_info->cleaner_kthread)) { ret = PTR_ERR(fs_info->cleaner_kthread); goto fail_sysfs; } fs_info->transaction_kthread = kthread_run(transaction_kthread, tree_root, "btrfs-transaction"); if (IS_ERR(fs_info->transaction_kthread)) { ret = PTR_ERR(fs_info->transaction_kthread); goto fail_cleaner; } ret = btrfs_read_qgroup_config(fs_info); if (ret) goto fail_trans_kthread; if (btrfs_build_ref_tree(fs_info)) btrfs_err(fs_info, "couldn't build ref tree"); /* do not make disk changes in broken FS or nologreplay is given */ if (btrfs_super_log_root(disk_super) != 0 && !btrfs_test_opt(fs_info, NOLOGREPLAY)) { btrfs_info(fs_info, "start tree-log replay"); ret = btrfs_replay_log(fs_info, fs_devices); if (ret) goto fail_qgroup; } fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true); if (IS_ERR(fs_info->fs_root)) { ret = PTR_ERR(fs_info->fs_root); btrfs_warn(fs_info, "failed to read fs tree: %d", ret); fs_info->fs_root = NULL; goto fail_qgroup; } if (sb_rdonly(sb)) return 0; ret = btrfs_start_pre_rw_mount(fs_info); if (ret) { close_ctree(fs_info); return ret; } btrfs_discard_resume(fs_info); if (fs_info->uuid_root && (btrfs_test_opt(fs_info, RESCAN_UUID_TREE) || fs_info->generation != btrfs_super_uuid_tree_generation(disk_super))) { btrfs_info(fs_info, "checking UUID tree"); ret = btrfs_check_uuid_tree(fs_info); if (ret) { btrfs_warn(fs_info, "failed to check the UUID tree: %d", ret); close_ctree(fs_info); return ret; } } set_bit(BTRFS_FS_OPEN, &fs_info->flags); /* Kick the cleaner thread so it'll start deleting snapshots. */ if (test_bit(BTRFS_FS_UNFINISHED_DROPS, &fs_info->flags)) wake_up_process(fs_info->cleaner_kthread); return 0; fail_qgroup: btrfs_free_qgroup_config(fs_info); fail_trans_kthread: kthread_stop(fs_info->transaction_kthread); btrfs_cleanup_transaction(fs_info); btrfs_free_fs_roots(fs_info); fail_cleaner: kthread_stop(fs_info->cleaner_kthread); /* * make sure we're done with the btree inode before we stop our * kthreads */ filemap_write_and_wait(fs_info->btree_inode->i_mapping); fail_sysfs: btrfs_sysfs_remove_mounted(fs_info); fail_fsdev_sysfs: btrfs_sysfs_remove_fsid(fs_info->fs_devices); fail_block_groups: btrfs_put_block_group_cache(fs_info); fail_tree_roots: if (fs_info->data_reloc_root) btrfs_drop_and_free_fs_root(fs_info, fs_info->data_reloc_root); free_root_pointers(fs_info, true); invalidate_inode_pages2(fs_info->btree_inode->i_mapping); fail_sb_buffer: btrfs_stop_all_workers(fs_info); btrfs_free_block_groups(fs_info); fail_alloc: btrfs_mapping_tree_free(fs_info); iput(fs_info->btree_inode); fail: btrfs_close_devices(fs_info->fs_devices); ASSERT(ret < 0); return ret; } ALLOW_ERROR_INJECTION(open_ctree, ERRNO); static void btrfs_end_super_write(struct bio *bio) { struct btrfs_device *device = bio->bi_private; struct folio_iter fi; bio_for_each_folio_all(fi, bio) { if (bio->bi_status) { btrfs_warn_rl_in_rcu(device->fs_info, "lost super block write due to IO error on %s (%d)", btrfs_dev_name(device), blk_status_to_errno(bio->bi_status)); btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS); /* Ensure failure if the primary sb fails. */ if (bio->bi_opf & REQ_FUA) atomic_add(BTRFS_SUPER_PRIMARY_WRITE_ERROR, &device->sb_write_errors); else atomic_inc(&device->sb_write_errors); } folio_unlock(fi.folio); folio_put(fi.folio); } bio_put(bio); } struct btrfs_super_block *btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, bool drop_cache) { struct btrfs_super_block *super; struct page *page; u64 bytenr, bytenr_orig; struct address_space *mapping = bdev->bd_mapping; int ret; bytenr_orig = btrfs_sb_offset(copy_num); ret = btrfs_sb_log_location_bdev(bdev, copy_num, READ, &bytenr); if (ret == -ENOENT) return ERR_PTR(-EINVAL); else if (ret) return ERR_PTR(ret); if (bytenr + BTRFS_SUPER_INFO_SIZE >= bdev_nr_bytes(bdev)) return ERR_PTR(-EINVAL); if (drop_cache) { /* This should only be called with the primary sb. */ ASSERT(copy_num == 0); /* * Drop the page of the primary superblock, so later read will * always read from the device. */ invalidate_inode_pages2_range(mapping, bytenr >> PAGE_SHIFT, (bytenr + BTRFS_SUPER_INFO_SIZE) >> PAGE_SHIFT); } page = read_cache_page_gfp(mapping, bytenr >> PAGE_SHIFT, GFP_NOFS); if (IS_ERR(page)) return ERR_CAST(page); super = page_address(page); if (btrfs_super_magic(super) != BTRFS_MAGIC) { btrfs_release_disk_super(super); return ERR_PTR(-ENODATA); } if (btrfs_super_bytenr(super) != bytenr_orig) { btrfs_release_disk_super(super); return ERR_PTR(-EINVAL); } return super; } struct btrfs_super_block *btrfs_read_dev_super(struct block_device *bdev) { struct btrfs_super_block *super, *latest = NULL; int i; u64 transid = 0; /* we would like to check all the supers, but that would make * a btrfs mount succeed after a mkfs from a different FS. * So, we need to add a special mount option to scan for * later supers, using BTRFS_SUPER_MIRROR_MAX instead */ for (i = 0; i < 1; i++) { super = btrfs_read_dev_one_super(bdev, i, false); if (IS_ERR(super)) continue; if (!latest || btrfs_super_generation(super) > transid) { if (latest) btrfs_release_disk_super(super); latest = super; transid = btrfs_super_generation(super); } } return super; } /* * Write superblock @sb to the @device. Do not wait for completion, all the * folios we use for writing are locked. * * Write @max_mirrors copies of the superblock, where 0 means default that fit * the expected device size at commit time. Note that max_mirrors must be * same for write and wait phases. * * Return number of errors when folio is not found or submission fails. */ static int write_dev_supers(struct btrfs_device *device, struct btrfs_super_block *sb, int max_mirrors) { struct btrfs_fs_info *fs_info = device->fs_info; struct address_space *mapping = device->bdev->bd_mapping; SHASH_DESC_ON_STACK(shash, fs_info->csum_shash); int i; int ret; u64 bytenr, bytenr_orig; atomic_set(&device->sb_write_errors, 0); if (max_mirrors == 0) max_mirrors = BTRFS_SUPER_MIRROR_MAX; shash->tfm = fs_info->csum_shash; for (i = 0; i < max_mirrors; i++) { struct folio *folio; struct bio *bio; struct btrfs_super_block *disk_super; size_t offset; bytenr_orig = btrfs_sb_offset(i); ret = btrfs_sb_log_location(device, i, WRITE, &bytenr); if (ret == -ENOENT) { continue; } else if (ret < 0) { btrfs_err(device->fs_info, "couldn't get super block location for mirror %d", i); atomic_inc(&device->sb_write_errors); continue; } if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->commit_total_bytes) break; btrfs_set_super_bytenr(sb, bytenr_orig); crypto_shash_digest(shash, (const char *)sb + BTRFS_CSUM_SIZE, BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE, sb->csum); folio = __filemap_get_folio(mapping, bytenr >> PAGE_SHIFT, FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS); if (IS_ERR(folio)) { btrfs_err(device->fs_info, "couldn't get super block page for bytenr %llu", bytenr); atomic_inc(&device->sb_write_errors); continue; } ASSERT(folio_order(folio) == 0); offset = offset_in_folio(folio, bytenr); disk_super = folio_address(folio) + offset; memcpy(disk_super, sb, BTRFS_SUPER_INFO_SIZE); /* * Directly use bios here instead of relying on the page cache * to do I/O, so we don't lose the ability to do integrity * checking. */ bio = bio_alloc(device->bdev, 1, REQ_OP_WRITE | REQ_SYNC | REQ_META | REQ_PRIO, GFP_NOFS); bio->bi_iter.bi_sector = bytenr >> SECTOR_SHIFT; bio->bi_private = device; bio->bi_end_io = btrfs_end_super_write; bio_add_folio_nofail(bio, folio, BTRFS_SUPER_INFO_SIZE, offset); /* * We FUA only the first super block. The others we allow to * go down lazy and there's a short window where the on-disk * copies might still contain the older version. */ if (i == 0 && !btrfs_test_opt(device->fs_info, NOBARRIER)) bio->bi_opf |= REQ_FUA; submit_bio(bio); if (btrfs_advance_sb_log(device, i)) atomic_inc(&device->sb_write_errors); } return atomic_read(&device->sb_write_errors) < i ? 0 : -1; } /* * Wait for write completion of superblocks done by write_dev_supers, * @max_mirrors same for write and wait phases. * * Return -1 if primary super block write failed or when there were no super block * copies written. Otherwise 0. */ static int wait_dev_supers(struct btrfs_device *device, int max_mirrors) { int i; int errors = 0; bool primary_failed = false; int ret; u64 bytenr; if (max_mirrors == 0) max_mirrors = BTRFS_SUPER_MIRROR_MAX; for (i = 0; i < max_mirrors; i++) { struct folio *folio; ret = btrfs_sb_log_location(device, i, READ, &bytenr); if (ret == -ENOENT) { break; } else if (ret < 0) { errors++; if (i == 0) primary_failed = true; continue; } if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->commit_total_bytes) break; folio = filemap_get_folio(device->bdev->bd_mapping, bytenr >> PAGE_SHIFT); /* If the folio has been removed, then we know it completed. */ if (IS_ERR(folio)) continue; ASSERT(folio_order(folio) == 0); /* Folio will be unlocked once the write completes. */ folio_wait_locked(folio); folio_put(folio); } errors += atomic_read(&device->sb_write_errors); if (errors >= BTRFS_SUPER_PRIMARY_WRITE_ERROR) primary_failed = true; if (primary_failed) { btrfs_err(device->fs_info, "error writing primary super block to device %llu", device->devid); return -1; } return errors < i ? 0 : -1; } /* * endio for the write_dev_flush, this will wake anyone waiting * for the barrier when it is done */ static void btrfs_end_empty_barrier(struct bio *bio) { bio_uninit(bio); complete(bio->bi_private); } /* * Submit a flush request to the device if it supports it. Error handling is * done in the waiting counterpart. */ static void write_dev_flush(struct btrfs_device *device) { struct bio *bio = &device->flush_bio; device->last_flush_error = BLK_STS_OK; bio_init(bio, device->bdev, NULL, 0, REQ_OP_WRITE | REQ_SYNC | REQ_PREFLUSH); bio->bi_end_io = btrfs_end_empty_barrier; init_completion(&device->flush_wait); bio->bi_private = &device->flush_wait; submit_bio(bio); set_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state); } /* * If the flush bio has been submitted by write_dev_flush, wait for it. * Return true for any error, and false otherwise. */ static bool wait_dev_flush(struct btrfs_device *device) { struct bio *bio = &device->flush_bio; if (!test_and_clear_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state)) return false; wait_for_completion_io(&device->flush_wait); if (bio->bi_status) { device->last_flush_error = bio->bi_status; btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_FLUSH_ERRS); return true; } return false; } /* * send an empty flush down to each device in parallel, * then wait for them */ static int barrier_all_devices(struct btrfs_fs_info *info) { struct list_head *head; struct btrfs_device *dev; int errors_wait = 0; lockdep_assert_held(&info->fs_devices->device_list_mutex); /* send down all the barriers */ head = &info->fs_devices->devices; list_for_each_entry(dev, head, dev_list) { if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) continue; if (!dev->bdev) continue; if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) continue; write_dev_flush(dev); } /* wait for all the barriers */ list_for_each_entry(dev, head, dev_list) { if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state)) continue; if (!dev->bdev) { errors_wait++; continue; } if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) continue; if (wait_dev_flush(dev)) errors_wait++; } /* * Checks last_flush_error of disks in order to determine the device * state. */ if (errors_wait && !btrfs_check_rw_degradable(info, NULL)) return -EIO; return 0; } int btrfs_get_num_tolerated_disk_barrier_failures(u64 flags) { int raid_type; int min_tolerated = INT_MAX; if ((flags & BTRFS_BLOCK_GROUP_PROFILE_MASK) == 0 || (flags & BTRFS_AVAIL_ALLOC_BIT_SINGLE)) min_tolerated = min_t(int, min_tolerated, btrfs_raid_array[BTRFS_RAID_SINGLE]. tolerated_failures); for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) { if (raid_type == BTRFS_RAID_SINGLE) continue; if (!(flags & btrfs_raid_array[raid_type].bg_flag)) continue; min_tolerated = min_t(int, min_tolerated, btrfs_raid_array[raid_type]. tolerated_failures); } if (min_tolerated == INT_MAX) { pr_warn("BTRFS: unknown raid flag: %llu", flags); min_tolerated = 0; } return min_tolerated; } int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors) { struct list_head *head; struct btrfs_device *dev; struct btrfs_super_block *sb; struct btrfs_dev_item *dev_item; int ret; int do_barriers; int max_errors; int total_errors = 0; u64 flags; do_barriers = !btrfs_test_opt(fs_info, NOBARRIER); /* * max_mirrors == 0 indicates we're from commit_transaction, * not from fsync where the tree roots in fs_info have not * been consistent on disk. */ if (max_mirrors == 0) backup_super_roots(fs_info); sb = fs_info->super_for_commit; dev_item = &sb->dev_item; mutex_lock(&fs_info->fs_devices->device_list_mutex); head = &fs_info->fs_devices->devices; max_errors = btrfs_super_num_devices(fs_info->super_copy) - 1; if (do_barriers) { ret = barrier_all_devices(fs_info); if (ret) { mutex_unlock( &fs_info->fs_devices->device_list_mutex); btrfs_handle_fs_error(fs_info, ret, "errors while submitting device barriers."); return ret; } } list_for_each_entry(dev, head, dev_list) { if (!dev->bdev) { total_errors++; continue; } if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) continue; btrfs_set_stack_device_generation(dev_item, 0); btrfs_set_stack_device_type(dev_item, dev->type); btrfs_set_stack_device_id(dev_item, dev->devid); btrfs_set_stack_device_total_bytes(dev_item, dev->commit_total_bytes); btrfs_set_stack_device_bytes_used(dev_item, dev->commit_bytes_used); btrfs_set_stack_device_io_align(dev_item, dev->io_align); btrfs_set_stack_device_io_width(dev_item, dev->io_width); btrfs_set_stack_device_sector_size(dev_item, dev->sector_size); memcpy(dev_item->uuid, dev->uuid, BTRFS_UUID_SIZE); memcpy(dev_item->fsid, dev->fs_devices->metadata_uuid, BTRFS_FSID_SIZE); flags = btrfs_super_flags(sb); btrfs_set_super_flags(sb, flags | BTRFS_HEADER_FLAG_WRITTEN); ret = btrfs_validate_write_super(fs_info, sb); if (ret < 0) { mutex_unlock(&fs_info->fs_devices->device_list_mutex); btrfs_handle_fs_error(fs_info, -EUCLEAN, "unexpected superblock corruption detected"); return -EUCLEAN; } ret = write_dev_supers(dev, sb, max_mirrors); if (ret) total_errors++; } if (total_errors > max_errors) { btrfs_err(fs_info, "%d errors while writing supers", total_errors); mutex_unlock(&fs_info->fs_devices->device_list_mutex); /* FUA is masked off if unsupported and can't be the reason */ btrfs_handle_fs_error(fs_info, -EIO, "%d errors while writing supers", total_errors); return -EIO; } total_errors = 0; list_for_each_entry(dev, head, dev_list) { if (!dev->bdev) continue; if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &dev->dev_state) || !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state)) continue; ret = wait_dev_supers(dev, max_mirrors); if (ret) total_errors++; } mutex_unlock(&fs_info->fs_devices->device_list_mutex); if (total_errors > max_errors) { btrfs_handle_fs_error(fs_info, -EIO, "%d errors while writing supers", total_errors); return -EIO; } return 0; } /* Drop a fs root from the radix tree and free it. */ void btrfs_drop_and_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) { bool drop_ref = false; spin_lock(&fs_info->fs_roots_radix_lock); radix_tree_delete(&fs_info->fs_roots_radix, (unsigned long)btrfs_root_id(root)); if (test_and_clear_bit(BTRFS_ROOT_IN_RADIX, &root->state)) drop_ref = true; spin_unlock(&fs_info->fs_roots_radix_lock); if (BTRFS_FS_ERROR(fs_info)) { ASSERT(root->log_root == NULL); if (root->reloc_root) { btrfs_put_root(root->reloc_root); root->reloc_root = NULL; } } if (drop_ref) btrfs_put_root(root); } int btrfs_commit_super(struct btrfs_fs_info *fs_info) { mutex_lock(&fs_info->cleaner_mutex); btrfs_run_delayed_iputs(fs_info); mutex_unlock(&fs_info->cleaner_mutex); wake_up_process(fs_info->cleaner_kthread); /* wait until ongoing cleanup work done */ down_write(&fs_info->cleanup_work_sem); up_write(&fs_info->cleanup_work_sem); return btrfs_commit_current_transaction(fs_info->tree_root); } static void warn_about_uncommitted_trans(struct btrfs_fs_info *fs_info) { struct btrfs_transaction *trans; struct btrfs_transaction *tmp; bool found = false; /* * This function is only called at the very end of close_ctree(), * thus no other running transaction, no need to take trans_lock. */ ASSERT(test_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags)); list_for_each_entry_safe(trans, tmp, &fs_info->trans_list, list) { struct extent_state *cached = NULL; u64 dirty_bytes = 0; u64 cur = 0; u64 found_start; u64 found_end; found = true; while (find_first_extent_bit(&trans->dirty_pages, cur, &found_start, &found_end, EXTENT_DIRTY, &cached)) { dirty_bytes += found_end + 1 - found_start; cur = found_end + 1; } btrfs_warn(fs_info, "transaction %llu (with %llu dirty metadata bytes) is not committed", trans->transid, dirty_bytes); btrfs_cleanup_one_transaction(trans); if (trans == fs_info->running_transaction) fs_info->running_transaction = NULL; list_del_init(&trans->list); btrfs_put_transaction(trans); trace_btrfs_transaction_commit(fs_info); } ASSERT(!found); } void __cold close_ctree(struct btrfs_fs_info *fs_info) { int ret; set_bit(BTRFS_FS_CLOSING_START, &fs_info->flags); /* * If we had UNFINISHED_DROPS we could still be processing them, so * clear that bit and wake up relocation so it can stop. * We must do this before stopping the block group reclaim task, because * at btrfs_relocate_block_group() we wait for this bit, and after the * wait we stop with -EINTR if btrfs_fs_closing() returns non-zero - we * have just set BTRFS_FS_CLOSING_START, so btrfs_fs_closing() will * return 1. */ btrfs_wake_unfinished_drop(fs_info); /* * We may have the reclaim task running and relocating a data block group, * in which case it may create delayed iputs. So stop it before we park * the cleaner kthread otherwise we can get new delayed iputs after * parking the cleaner, and that can make the async reclaim task to hang * if it's waiting for delayed iputs to complete, since the cleaner is * parked and can not run delayed iputs - this will make us hang when * trying to stop the async reclaim task. */ cancel_work_sync(&fs_info->reclaim_bgs_work); /* * We don't want the cleaner to start new transactions, add more delayed * iputs, etc. while we're closing. We can't use kthread_stop() yet * because that frees the task_struct, and the transaction kthread might * still try to wake up the cleaner. */ kthread_park(fs_info->cleaner_kthread); /* wait for the qgroup rescan worker to stop */ btrfs_qgroup_wait_for_completion(fs_info, false); /* wait for the uuid_scan task to finish */ down(&fs_info->uuid_tree_rescan_sem); /* avoid complains from lockdep et al., set sem back to initial state */ up(&fs_info->uuid_tree_rescan_sem); /* pause restriper - we want to resume on mount */ btrfs_pause_balance(fs_info); btrfs_dev_replace_suspend_for_unmount(fs_info); btrfs_scrub_cancel(fs_info); /* wait for any defraggers to finish */ wait_event(fs_info->transaction_wait, (atomic_read(&fs_info->defrag_running) == 0)); /* clear out the rbtree of defraggable inodes */ btrfs_cleanup_defrag_inodes(fs_info); /* * Wait for any fixup workers to complete. * If we don't wait for them here and they are still running by the time * we call kthread_stop() against the cleaner kthread further below, we * get an use-after-free on the cleaner because the fixup worker adds an * inode to the list of delayed iputs and then attempts to wakeup the * cleaner kthread, which was already stopped and destroyed. We parked * already the cleaner, but below we run all pending delayed iputs. */ btrfs_flush_workqueue(fs_info->fixup_workers); /* * Similar case here, we have to wait for delalloc workers before we * proceed below and stop the cleaner kthread, otherwise we trigger a * use-after-tree on the cleaner kthread task_struct when a delalloc * worker running submit_compressed_extents() adds a delayed iput, which * does a wake up on the cleaner kthread, which was already freed below * when we call kthread_stop(). */ btrfs_flush_workqueue(fs_info->delalloc_workers); /* * After we parked the cleaner kthread, ordered extents may have * completed and created new delayed iputs. If one of the async reclaim * tasks is running and in the RUN_DELAYED_IPUTS flush state, then we * can hang forever trying to stop it, because if a delayed iput is * added after it ran btrfs_run_delayed_iputs() and before it called * btrfs_wait_on_delayed_iputs(), it will hang forever since there is * no one else to run iputs. * * So wait for all ongoing ordered extents to complete and then run * delayed iputs. This works because once we reach this point no one * can either create new ordered extents nor create delayed iputs * through some other means. * * Also note that btrfs_wait_ordered_roots() is not safe here, because * it waits for BTRFS_ORDERED_COMPLETE to be set on an ordered extent, * but the delayed iput for the respective inode is made only when doing * the final btrfs_put_ordered_extent() (which must happen at * btrfs_finish_ordered_io() when we are unmounting). */ btrfs_flush_workqueue(fs_info->endio_write_workers); /* Ordered extents for free space inodes. */ btrfs_flush_workqueue(fs_info->endio_freespace_worker); btrfs_run_delayed_iputs(fs_info); cancel_work_sync(&fs_info->async_reclaim_work); cancel_work_sync(&fs_info->async_data_reclaim_work); cancel_work_sync(&fs_info->preempt_reclaim_work); cancel_work_sync(&fs_info->em_shrinker_work); /* Cancel or finish ongoing discard work */ btrfs_discard_cleanup(fs_info); if (!sb_rdonly(fs_info->sb)) { /* * The cleaner kthread is stopped, so do one final pass over * unused block groups. */ btrfs_delete_unused_bgs(fs_info); /* * There might be existing delayed inode workers still running * and holding an empty delayed inode item. We must wait for * them to complete first because they can create a transaction. * This happens when someone calls btrfs_balance_delayed_items() * and then a transaction commit runs the same delayed nodes * before any delayed worker has done something with the nodes. * We must wait for any worker here and not at transaction * commit time since that could cause a deadlock. * This is a very rare case. */ btrfs_flush_workqueue(fs_info->delayed_workers); ret = btrfs_commit_super(fs_info); if (ret) btrfs_err(fs_info, "commit super ret %d", ret); } if (BTRFS_FS_ERROR(fs_info)) btrfs_error_commit_super(fs_info); kthread_stop(fs_info->transaction_kthread); kthread_stop(fs_info->cleaner_kthread); ASSERT(list_empty(&fs_info->delayed_iputs)); set_bit(BTRFS_FS_CLOSING_DONE, &fs_info->flags); if (btrfs_check_quota_leak(fs_info)) { WARN_ON(IS_ENABLED(CONFIG_BTRFS_DEBUG)); btrfs_err(fs_info, "qgroup reserved space leaked"); } btrfs_free_qgroup_config(fs_info); ASSERT(list_empty(&fs_info->delalloc_roots)); if (percpu_counter_sum(&fs_info->delalloc_bytes)) { btrfs_info(fs_info, "at unmount delalloc count %lld", percpu_counter_sum(&fs_info->delalloc_bytes)); } if (percpu_counter_sum(&fs_info->ordered_bytes)) btrfs_info(fs_info, "at unmount dio bytes count %lld", percpu_counter_sum(&fs_info->ordered_bytes)); btrfs_sysfs_remove_mounted(fs_info); btrfs_sysfs_remove_fsid(fs_info->fs_devices); btrfs_put_block_group_cache(fs_info); /* * we must make sure there is not any read request to * submit after we stopping all workers. */ invalidate_inode_pages2(fs_info->btree_inode->i_mapping); btrfs_stop_all_workers(fs_info); /* We shouldn't have any transaction open at this point */ warn_about_uncommitted_trans(fs_info); clear_bit(BTRFS_FS_OPEN, &fs_info->flags); free_root_pointers(fs_info, true); btrfs_free_fs_roots(fs_info); /* * We must free the block groups after dropping the fs_roots as we could * have had an IO error and have left over tree log blocks that aren't * cleaned up until the fs roots are freed. This makes the block group * accounting appear to be wrong because there's pending reserved bytes, * so make sure we do the block group cleanup afterwards. */ btrfs_free_block_groups(fs_info); iput(fs_info->btree_inode); btrfs_mapping_tree_free(fs_info); btrfs_close_devices(fs_info->fs_devices); } void btrfs_mark_buffer_dirty(struct btrfs_trans_handle *trans, struct extent_buffer *buf) { struct btrfs_fs_info *fs_info = buf->fs_info; u64 transid = btrfs_header_generation(buf); #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS /* * This is a fast path so only do this check if we have sanity tests * enabled. Normal people shouldn't be using unmapped buffers as dirty * outside of the sanity tests. */ if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags))) return; #endif /* This is an active transaction (its state < TRANS_STATE_UNBLOCKED). */ ASSERT(trans->transid == fs_info->generation); btrfs_assert_tree_write_locked(buf); if (unlikely(transid != fs_info->generation)) { btrfs_abort_transaction(trans, -EUCLEAN); btrfs_crit(fs_info, "dirty buffer transid mismatch, logical %llu found transid %llu running transid %llu", buf->start, transid, fs_info->generation); } set_extent_buffer_dirty(buf); } static void __btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info, int flush_delayed) { /* * looks as though older kernels can get into trouble with * this code, they end up stuck in balance_dirty_pages forever */ int ret; if (current->flags & PF_MEMALLOC) return; if (flush_delayed) btrfs_balance_delayed_items(fs_info); ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, BTRFS_DIRTY_METADATA_THRESH, fs_info->dirty_metadata_batch); if (ret > 0) { balance_dirty_pages_ratelimited(fs_info->btree_inode->i_mapping); } } void btrfs_btree_balance_dirty(struct btrfs_fs_info *fs_info) { __btrfs_btree_balance_dirty(fs_info, 1); } void btrfs_btree_balance_dirty_nodelay(struct btrfs_fs_info *fs_info) { __btrfs_btree_balance_dirty(fs_info, 0); } static void btrfs_error_commit_super(struct btrfs_fs_info *fs_info) { /* cleanup FS via transaction */ btrfs_cleanup_transaction(fs_info); mutex_lock(&fs_info->cleaner_mutex); btrfs_run_delayed_iputs(fs_info); mutex_unlock(&fs_info->cleaner_mutex); down_write(&fs_info->cleanup_work_sem); up_write(&fs_info->cleanup_work_sem); } static void btrfs_drop_all_logs(struct btrfs_fs_info *fs_info) { struct btrfs_root *gang[8]; u64 root_objectid = 0; int ret; spin_lock(&fs_info->fs_roots_radix_lock); while ((ret = radix_tree_gang_lookup(&fs_info->fs_roots_radix, (void **)gang, root_objectid, ARRAY_SIZE(gang))) != 0) { int i; for (i = 0; i < ret; i++) gang[i] = btrfs_grab_root(gang[i]); spin_unlock(&fs_info->fs_roots_radix_lock); for (i = 0; i < ret; i++) { if (!gang[i]) continue; root_objectid = btrfs_root_id(gang[i]); btrfs_free_log(NULL, gang[i]); btrfs_put_root(gang[i]); } root_objectid++; spin_lock(&fs_info->fs_roots_radix_lock); } spin_unlock(&fs_info->fs_roots_radix_lock); btrfs_free_log_root_tree(NULL, fs_info); } static void btrfs_destroy_ordered_extents(struct btrfs_root *root) { struct btrfs_ordered_extent *ordered; spin_lock(&root->ordered_extent_lock); /* * This will just short circuit the ordered completion stuff which will * make sure the ordered extent gets properly cleaned up. */ list_for_each_entry(ordered, &root->ordered_extents, root_extent_list) set_bit(BTRFS_ORDERED_IOERR, &ordered->flags); spin_unlock(&root->ordered_extent_lock); } static void btrfs_destroy_all_ordered_extents(struct btrfs_fs_info *fs_info) { struct btrfs_root *root; LIST_HEAD(splice); spin_lock(&fs_info->ordered_root_lock); list_splice_init(&fs_info->ordered_roots, &splice); while (!list_empty(&splice)) { root = list_first_entry(&splice, struct btrfs_root, ordered_root); list_move_tail(&root->ordered_root, &fs_info->ordered_roots); spin_unlock(&fs_info->ordered_root_lock); btrfs_destroy_ordered_extents(root); cond_resched(); spin_lock(&fs_info->ordered_root_lock); } spin_unlock(&fs_info->ordered_root_lock); /* * We need this here because if we've been flipped read-only we won't * get sync() from the umount, so we need to make sure any ordered * extents that haven't had their dirty pages IO start writeout yet * actually get run and error out properly. */ btrfs_wait_ordered_roots(fs_info, U64_MAX, NULL); } static void btrfs_destroy_delalloc_inodes(struct btrfs_root *root) { struct btrfs_inode *btrfs_inode; LIST_HEAD(splice); spin_lock(&root->delalloc_lock); list_splice_init(&root->delalloc_inodes, &splice); while (!list_empty(&splice)) { struct inode *inode = NULL; btrfs_inode = list_first_entry(&splice, struct btrfs_inode, delalloc_inodes); btrfs_del_delalloc_inode(btrfs_inode); spin_unlock(&root->delalloc_lock); /* * Make sure we get a live inode and that it'll not disappear * meanwhile. */ inode = igrab(&btrfs_inode->vfs_inode); if (inode) { unsigned int nofs_flag; nofs_flag = memalloc_nofs_save(); invalidate_inode_pages2(inode->i_mapping); memalloc_nofs_restore(nofs_flag); iput(inode); } spin_lock(&root->delalloc_lock); } spin_unlock(&root->delalloc_lock); } static void btrfs_destroy_all_delalloc_inodes(struct btrfs_fs_info *fs_info) { struct btrfs_root *root; LIST_HEAD(splice); spin_lock(&fs_info->delalloc_root_lock); list_splice_init(&fs_info->delalloc_roots, &splice); while (!list_empty(&splice)) { root = list_first_entry(&splice, struct btrfs_root, delalloc_root); root = btrfs_grab_root(root); BUG_ON(!root); spin_unlock(&fs_info->delalloc_root_lock); btrfs_destroy_delalloc_inodes(root); btrfs_put_root(root); spin_lock(&fs_info->delalloc_root_lock); } spin_unlock(&fs_info->delalloc_root_lock); } static void btrfs_destroy_marked_extents(struct btrfs_fs_info *fs_info, struct extent_io_tree *dirty_pages, int mark) { struct extent_buffer *eb; u64 start = 0; u64 end; while (find_first_extent_bit(dirty_pages, start, &start, &end, mark, NULL)) { clear_extent_bits(dirty_pages, start, end, mark); while (start <= end) { eb = find_extent_buffer(fs_info, start); start += fs_info->nodesize; if (!eb) continue; btrfs_tree_lock(eb); wait_on_extent_buffer_writeback(eb); btrfs_clear_buffer_dirty(NULL, eb); btrfs_tree_unlock(eb); free_extent_buffer_stale(eb); } } } static void btrfs_destroy_pinned_extent(struct btrfs_fs_info *fs_info, struct extent_io_tree *unpin) { u64 start; u64 end; while (1) { struct extent_state *cached_state = NULL; /* * The btrfs_finish_extent_commit() may get the same range as * ours between find_first_extent_bit and clear_extent_dirty. * Hence, hold the unused_bg_unpin_mutex to avoid double unpin * the same extent range. */ mutex_lock(&fs_info->unused_bg_unpin_mutex); if (!find_first_extent_bit(unpin, 0, &start, &end, EXTENT_DIRTY, &cached_state)) { mutex_unlock(&fs_info->unused_bg_unpin_mutex); break; } clear_extent_dirty(unpin, start, end, &cached_state); free_extent_state(cached_state); btrfs_error_unpin_extent_range(fs_info, start, end); mutex_unlock(&fs_info->unused_bg_unpin_mutex); cond_resched(); } } static void btrfs_cleanup_bg_io(struct btrfs_block_group *cache) { struct inode *inode; inode = cache->io_ctl.inode; if (inode) { unsigned int nofs_flag; nofs_flag = memalloc_nofs_save(); invalidate_inode_pages2(inode->i_mapping); memalloc_nofs_restore(nofs_flag); BTRFS_I(inode)->generation = 0; cache->io_ctl.inode = NULL; iput(inode); } ASSERT(cache->io_ctl.pages == NULL); btrfs_put_block_group(cache); } void btrfs_cleanup_dirty_bgs(struct btrfs_transaction *cur_trans, struct btrfs_fs_info *fs_info) { struct btrfs_block_group *cache; spin_lock(&cur_trans->dirty_bgs_lock); while (!list_empty(&cur_trans->dirty_bgs)) { cache = list_first_entry(&cur_trans->dirty_bgs, struct btrfs_block_group, dirty_list); if (!list_empty(&cache->io_list)) { spin_unlock(&cur_trans->dirty_bgs_lock); list_del_init(&cache->io_list); btrfs_cleanup_bg_io(cache); spin_lock(&cur_trans->dirty_bgs_lock); } list_del_init(&cache->dirty_list); spin_lock(&cache->lock); cache->disk_cache_state = BTRFS_DC_ERROR; spin_unlock(&cache->lock); spin_unlock(&cur_trans->dirty_bgs_lock); btrfs_put_block_group(cache); btrfs_dec_delayed_refs_rsv_bg_updates(fs_info); spin_lock(&cur_trans->dirty_bgs_lock); } spin_unlock(&cur_trans->dirty_bgs_lock); /* * Refer to the definition of io_bgs member for details why it's safe * to use it without any locking */ while (!list_empty(&cur_trans->io_bgs)) { cache = list_first_entry(&cur_trans->io_bgs, struct btrfs_block_group, io_list); list_del_init(&cache->io_list); spin_lock(&cache->lock); cache->disk_cache_state = BTRFS_DC_ERROR; spin_unlock(&cache->lock); btrfs_cleanup_bg_io(cache); } } static void btrfs_free_all_qgroup_pertrans(struct btrfs_fs_info *fs_info) { struct btrfs_root *gang[8]; int i; int ret; spin_lock(&fs_info->fs_roots_radix_lock); while (1) { ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, (void **)gang, 0, ARRAY_SIZE(gang), BTRFS_ROOT_TRANS_TAG); if (ret == 0) break; for (i = 0; i < ret; i++) { struct btrfs_root *root = gang[i]; btrfs_qgroup_free_meta_all_pertrans(root); radix_tree_tag_clear(&fs_info->fs_roots_radix, (unsigned long)btrfs_root_id(root), BTRFS_ROOT_TRANS_TAG); } } spin_unlock(&fs_info->fs_roots_radix_lock); } void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans) { struct btrfs_fs_info *fs_info = cur_trans->fs_info; struct btrfs_device *dev, *tmp; btrfs_cleanup_dirty_bgs(cur_trans, fs_info); ASSERT(list_empty(&cur_trans->dirty_bgs)); ASSERT(list_empty(&cur_trans->io_bgs)); list_for_each_entry_safe(dev, tmp, &cur_trans->dev_update_list, post_commit_list) { list_del_init(&dev->post_commit_list); } btrfs_destroy_delayed_refs(cur_trans); cur_trans->state = TRANS_STATE_COMMIT_START; wake_up(&fs_info->transaction_blocked_wait); cur_trans->state = TRANS_STATE_UNBLOCKED; wake_up(&fs_info->transaction_wait); btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages, EXTENT_DIRTY); btrfs_destroy_pinned_extent(fs_info, &cur_trans->pinned_extents); cur_trans->state =TRANS_STATE_COMPLETED; wake_up(&cur_trans->commit_wait); } static int btrfs_cleanup_transaction(struct btrfs_fs_info *fs_info) { struct btrfs_transaction *t; mutex_lock(&fs_info->transaction_kthread_mutex); spin_lock(&fs_info->trans_lock); while (!list_empty(&fs_info->trans_list)) { t = list_first_entry(&fs_info->trans_list, struct btrfs_transaction, list); if (t->state >= TRANS_STATE_COMMIT_PREP) { refcount_inc(&t->use_count); spin_unlock(&fs_info->trans_lock); btrfs_wait_for_commit(fs_info, t->transid); btrfs_put_transaction(t); spin_lock(&fs_info->trans_lock); continue; } if (t == fs_info->running_transaction) { t->state = TRANS_STATE_COMMIT_DOING; spin_unlock(&fs_info->trans_lock); /* * We wait for 0 num_writers since we don't hold a trans * handle open currently for this transaction. */ wait_event(t->writer_wait, atomic_read(&t->num_writers) == 0); } else { spin_unlock(&fs_info->trans_lock); } btrfs_cleanup_one_transaction(t); spin_lock(&fs_info->trans_lock); if (t == fs_info->running_transaction) fs_info->running_transaction = NULL; list_del_init(&t->list); spin_unlock(&fs_info->trans_lock); btrfs_put_transaction(t); trace_btrfs_transaction_commit(fs_info); spin_lock(&fs_info->trans_lock); } spin_unlock(&fs_info->trans_lock); btrfs_destroy_all_ordered_extents(fs_info); btrfs_destroy_delayed_inodes(fs_info); btrfs_assert_delayed_root_empty(fs_info); btrfs_destroy_all_delalloc_inodes(fs_info); btrfs_drop_all_logs(fs_info); btrfs_free_all_qgroup_pertrans(fs_info); mutex_unlock(&fs_info->transaction_kthread_mutex); return 0; } int btrfs_init_root_free_objectid(struct btrfs_root *root) { struct btrfs_path *path; int ret; struct extent_buffer *l; struct btrfs_key search_key; struct btrfs_key found_key; int slot; path = btrfs_alloc_path(); if (!path) return -ENOMEM; search_key.objectid = BTRFS_LAST_FREE_OBJECTID; search_key.type = -1; search_key.offset = (u64)-1; ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); if (ret < 0) goto error; if (ret == 0) { /* * Key with offset -1 found, there would have to exist a root * with such id, but this is out of valid range. */ ret = -EUCLEAN; goto error; } if (path->slots[0] > 0) { slot = path->slots[0] - 1; l = path->nodes[0]; btrfs_item_key_to_cpu(l, &found_key, slot); root->free_objectid = max_t(u64, found_key.objectid + 1, BTRFS_FIRST_FREE_OBJECTID); } else { root->free_objectid = BTRFS_FIRST_FREE_OBJECTID; } ret = 0; error: btrfs_free_path(path); return ret; } int btrfs_get_free_objectid(struct btrfs_root *root, u64 *objectid) { int ret; mutex_lock(&root->objectid_mutex); if (unlikely(root->free_objectid >= BTRFS_LAST_FREE_OBJECTID)) { btrfs_warn(root->fs_info, "the objectid of root %llu reaches its highest value", btrfs_root_id(root)); ret = -ENOSPC; goto out; } *objectid = root->free_objectid++; ret = 0; out: mutex_unlock(&root->objectid_mutex); return ret; }
// SPDX-License-Identifier: GPL-2.0-only /* cg6.c: CGSIX (GX, GXplus, TGX) frame buffer driver * * Copyright (C) 2003, 2006 David S. Miller ([email protected]) * Copyright (C) 1996,1998 Jakub Jelinek ([email protected]) * Copyright (C) 1996 Miguel de Icaza ([email protected]) * Copyright (C) 1996 Eddie C. Dost ([email protected]) * * Driver layout based loosely on tgafb.c, see that file for credits. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/platform_device.h> #include <asm/io.h> #include <asm/fbio.h> #include "sbuslib.h" /* * Local functions. */ static int cg6_setcolreg(unsigned, unsigned, unsigned, unsigned, unsigned, struct fb_info *); static int cg6_blank(int, struct fb_info *); static void cg6_imageblit(struct fb_info *, const struct fb_image *); static void cg6_fillrect(struct fb_info *, const struct fb_fillrect *); static void cg6_copyarea(struct fb_info *info, const struct fb_copyarea *area); static int cg6_sync(struct fb_info *); static int cg6_pan_display(struct fb_var_screeninfo *, struct fb_info *); static int cg6_sbusfb_mmap(struct fb_info *info, struct vm_area_struct *vma); static int cg6_sbusfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg); /* * Frame buffer operations */ static const struct fb_ops cg6_ops = { .owner = THIS_MODULE, __FB_DEFAULT_SBUS_OPS_RDWR(cg6), .fb_setcolreg = cg6_setcolreg, .fb_blank = cg6_blank, .fb_pan_display = cg6_pan_display, .fb_fillrect = cg6_fillrect, .fb_copyarea = cg6_copyarea, .fb_imageblit = cg6_imageblit, .fb_sync = cg6_sync, __FB_DEFAULT_SBUS_OPS_IOCTL(cg6), __FB_DEFAULT_SBUS_OPS_MMAP(cg6), }; /* Offset of interesting structures in the OBIO space */ /* * Brooktree is the video dac and is funny to program on the cg6. * (it's even funnier on the cg3) * The FBC could be the frame buffer control * The FHC could is the frame buffer hardware control. */ #define CG6_ROM_OFFSET 0x0UL #define CG6_BROOKTREE_OFFSET 0x200000UL #define CG6_DHC_OFFSET 0x240000UL #define CG6_ALT_OFFSET 0x280000UL #define CG6_FHC_OFFSET 0x300000UL #define CG6_THC_OFFSET 0x301000UL #define CG6_FBC_OFFSET 0x700000UL #define CG6_TEC_OFFSET 0x701000UL #define CG6_RAM_OFFSET 0x800000UL /* FHC definitions */ #define CG6_FHC_FBID_SHIFT 24 #define CG6_FHC_FBID_MASK 255 #define CG6_FHC_REV_SHIFT 20 #define CG6_FHC_REV_MASK 15 #define CG6_FHC_FROP_DISABLE (1 << 19) #define CG6_FHC_ROW_DISABLE (1 << 18) #define CG6_FHC_SRC_DISABLE (1 << 17) #define CG6_FHC_DST_DISABLE (1 << 16) #define CG6_FHC_RESET (1 << 15) #define CG6_FHC_LITTLE_ENDIAN (1 << 13) #define CG6_FHC_RES_MASK (3 << 11) #define CG6_FHC_1024 (0 << 11) #define CG6_FHC_1152 (1 << 11) #define CG6_FHC_1280 (2 << 11) #define CG6_FHC_1600 (3 << 11) #define CG6_FHC_CPU_MASK (3 << 9) #define CG6_FHC_CPU_SPARC (0 << 9) #define CG6_FHC_CPU_68020 (1 << 9) #define CG6_FHC_CPU_386 (2 << 9) #define CG6_FHC_TEST (1 << 8) #define CG6_FHC_TEST_X_SHIFT 4 #define CG6_FHC_TEST_X_MASK 15 #define CG6_FHC_TEST_Y_SHIFT 0 #define CG6_FHC_TEST_Y_MASK 15 /* FBC mode definitions */ #define CG6_FBC_BLIT_IGNORE 0x00000000 #define CG6_FBC_BLIT_NOSRC 0x00100000 #define CG6_FBC_BLIT_SRC 0x00200000 #define CG6_FBC_BLIT_ILLEGAL 0x00300000 #define CG6_FBC_BLIT_MASK 0x00300000 #define CG6_FBC_VBLANK 0x00080000 #define CG6_FBC_MODE_IGNORE 0x00000000 #define CG6_FBC_MODE_COLOR8 0x00020000 #define CG6_FBC_MODE_COLOR1 0x00040000 #define CG6_FBC_MODE_HRMONO 0x00060000 #define CG6_FBC_MODE_MASK 0x00060000 #define CG6_FBC_DRAW_IGNORE 0x00000000 #define CG6_FBC_DRAW_RENDER 0x00008000 #define CG6_FBC_DRAW_PICK 0x00010000 #define CG6_FBC_DRAW_ILLEGAL 0x00018000 #define CG6_FBC_DRAW_MASK 0x00018000 #define CG6_FBC_BWRITE0_IGNORE 0x00000000 #define CG6_FBC_BWRITE0_ENABLE 0x00002000 #define CG6_FBC_BWRITE0_DISABLE 0x00004000 #define CG6_FBC_BWRITE0_ILLEGAL 0x00006000 #define CG6_FBC_BWRITE0_MASK 0x00006000 #define CG6_FBC_BWRITE1_IGNORE 0x00000000 #define CG6_FBC_BWRITE1_ENABLE 0x00000800 #define CG6_FBC_BWRITE1_DISABLE 0x00001000 #define CG6_FBC_BWRITE1_ILLEGAL 0x00001800 #define CG6_FBC_BWRITE1_MASK 0x00001800 #define CG6_FBC_BREAD_IGNORE 0x00000000 #define CG6_FBC_BREAD_0 0x00000200 #define CG6_FBC_BREAD_1 0x00000400 #define CG6_FBC_BREAD_ILLEGAL 0x00000600 #define CG6_FBC_BREAD_MASK 0x00000600 #define CG6_FBC_BDISP_IGNORE 0x00000000 #define CG6_FBC_BDISP_0 0x00000080 #define CG6_FBC_BDISP_1 0x00000100 #define CG6_FBC_BDISP_ILLEGAL 0x00000180 #define CG6_FBC_BDISP_MASK 0x00000180 #define CG6_FBC_INDEX_MOD 0x00000040 #define CG6_FBC_INDEX_MASK 0x00000030 /* THC definitions */ #define CG6_THC_MISC_REV_SHIFT 16 #define CG6_THC_MISC_REV_MASK 15 #define CG6_THC_MISC_RESET (1 << 12) #define CG6_THC_MISC_VIDEO (1 << 10) #define CG6_THC_MISC_SYNC (1 << 9) #define CG6_THC_MISC_VSYNC (1 << 8) #define CG6_THC_MISC_SYNC_ENAB (1 << 7) #define CG6_THC_MISC_CURS_RES (1 << 6) #define CG6_THC_MISC_INT_ENAB (1 << 5) #define CG6_THC_MISC_INT (1 << 4) #define CG6_THC_MISC_INIT 0x9f #define CG6_THC_CURSOFF ((65536-32) | ((65536-32) << 16)) /* The contents are unknown */ struct cg6_tec { int tec_matrix; int tec_clip; int tec_vdc; }; struct cg6_thc { u32 thc_pad0[512]; u32 thc_hs; /* hsync timing */ u32 thc_hsdvs; u32 thc_hd; u32 thc_vs; /* vsync timing */ u32 thc_vd; u32 thc_refresh; u32 thc_misc; u32 thc_pad1[56]; u32 thc_cursxy; /* cursor x,y position (16 bits each) */ u32 thc_cursmask[32]; /* cursor mask bits */ u32 thc_cursbits[32]; /* what to show where mask enabled */ }; struct cg6_fbc { u32 xxx0[1]; u32 mode; u32 clip; u32 xxx1[1]; u32 s; u32 draw; u32 blit; u32 font; u32 xxx2[24]; u32 x0, y0, z0, color0; u32 x1, y1, z1, color1; u32 x2, y2, z2, color2; u32 x3, y3, z3, color3; u32 offx, offy; u32 xxx3[2]; u32 incx, incy; u32 xxx4[2]; u32 clipminx, clipminy; u32 xxx5[2]; u32 clipmaxx, clipmaxy; u32 xxx6[2]; u32 fg; u32 bg; u32 alu; u32 pm; u32 pixelm; u32 xxx7[2]; u32 patalign; u32 pattern[8]; u32 xxx8[432]; u32 apointx, apointy, apointz; u32 xxx9[1]; u32 rpointx, rpointy, rpointz; u32 xxx10[5]; u32 pointr, pointg, pointb, pointa; u32 alinex, aliney, alinez; u32 xxx11[1]; u32 rlinex, rliney, rlinez; u32 xxx12[5]; u32 liner, lineg, lineb, linea; u32 atrix, atriy, atriz; u32 xxx13[1]; u32 rtrix, rtriy, rtriz; u32 xxx14[5]; u32 trir, trig, trib, tria; u32 aquadx, aquady, aquadz; u32 xxx15[1]; u32 rquadx, rquady, rquadz; u32 xxx16[5]; u32 quadr, quadg, quadb, quada; u32 arectx, arecty, arectz; u32 xxx17[1]; u32 rrectx, rrecty, rrectz; u32 xxx18[5]; u32 rectr, rectg, rectb, recta; }; struct bt_regs { u32 addr; u32 color_map; u32 control; u32 cursor; }; struct cg6_par { spinlock_t lock; struct bt_regs __iomem *bt; struct cg6_fbc __iomem *fbc; struct cg6_thc __iomem *thc; struct cg6_tec __iomem *tec; u32 __iomem *fhc; u32 flags; #define CG6_FLAG_BLANKED 0x00000001 unsigned long which_io; }; static int cg6_sync(struct fb_info *info) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_fbc __iomem *fbc = par->fbc; int limit = 10000; do { if (!(sbus_readl(&fbc->s) & 0x10000000)) break; udelay(10); } while (--limit > 0); return 0; } static void cg6_switch_from_graph(struct cg6_par *par) { struct cg6_thc __iomem *thc = par->thc; unsigned long flags; spin_lock_irqsave(&par->lock, flags); /* Hide the cursor. */ sbus_writel(CG6_THC_CURSOFF, &thc->thc_cursxy); spin_unlock_irqrestore(&par->lock, flags); } static int cg6_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct cg6_par *par = (struct cg6_par *)info->par; /* We just use this to catch switches out of * graphics mode. */ cg6_switch_from_graph(par); if (var->xoffset || var->yoffset || var->vmode) return -EINVAL; return 0; } /** * cg6_fillrect - Draws a rectangle on the screen. * * @info: frame buffer structure that represents a single frame buffer * @rect: structure defining the rectagle and operation. */ static void cg6_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_fbc __iomem *fbc = par->fbc; unsigned long flags; s32 val; /* CG6 doesn't handle ROP_XOR */ spin_lock_irqsave(&par->lock, flags); cg6_sync(info); sbus_writel(rect->color, &fbc->fg); sbus_writel(~(u32)0, &fbc->pixelm); sbus_writel(0xea80ff00, &fbc->alu); sbus_writel(0, &fbc->s); sbus_writel(0, &fbc->clip); sbus_writel(~(u32)0, &fbc->pm); sbus_writel(rect->dy, &fbc->arecty); sbus_writel(rect->dx, &fbc->arectx); sbus_writel(rect->dy + rect->height, &fbc->arecty); sbus_writel(rect->dx + rect->width, &fbc->arectx); do { val = sbus_readl(&fbc->draw); } while (val < 0 && (val & 0x20000000)); spin_unlock_irqrestore(&par->lock, flags); } /** * cg6_copyarea - Copies one area of the screen to another area. * * @info: frame buffer structure that represents a single frame buffer * @area: Structure providing the data to copy the framebuffer contents * from one region to another. * * This drawing operation copies a rectangular area from one area of the * screen to another area. */ static void cg6_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_fbc __iomem *fbc = par->fbc; unsigned long flags; int i; spin_lock_irqsave(&par->lock, flags); cg6_sync(info); sbus_writel(0xff, &fbc->fg); sbus_writel(0x00, &fbc->bg); sbus_writel(~0, &fbc->pixelm); sbus_writel(0xe880cccc, &fbc->alu); sbus_writel(0, &fbc->s); sbus_writel(0, &fbc->clip); sbus_writel(area->sy, &fbc->y0); sbus_writel(area->sx, &fbc->x0); sbus_writel(area->sy + area->height - 1, &fbc->y1); sbus_writel(area->sx + area->width - 1, &fbc->x1); sbus_writel(area->dy, &fbc->y2); sbus_writel(area->dx, &fbc->x2); sbus_writel(area->dy + area->height - 1, &fbc->y3); sbus_writel(area->dx + area->width - 1, &fbc->x3); do { i = sbus_readl(&fbc->blit); } while (i < 0 && (i & 0x20000000)); spin_unlock_irqrestore(&par->lock, flags); } /** * cg6_imageblit - Copies a image from system memory to the screen. * * @info: frame buffer structure that represents a single frame buffer * @image: structure defining the image. */ static void cg6_imageblit(struct fb_info *info, const struct fb_image *image) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_fbc __iomem *fbc = par->fbc; const u8 *data = image->data; unsigned long flags; u32 x, y; int i, width; if (image->depth > 1) { cfb_imageblit(info, image); return; } spin_lock_irqsave(&par->lock, flags); cg6_sync(info); sbus_writel(image->fg_color, &fbc->fg); sbus_writel(image->bg_color, &fbc->bg); sbus_writel(0x140000, &fbc->mode); sbus_writel(0xe880fc30, &fbc->alu); sbus_writel(~(u32)0, &fbc->pixelm); sbus_writel(0, &fbc->s); sbus_writel(0, &fbc->clip); sbus_writel(0xff, &fbc->pm); sbus_writel(32, &fbc->incx); sbus_writel(0, &fbc->incy); x = image->dx; y = image->dy; for (i = 0; i < image->height; i++) { width = image->width; while (width >= 32) { u32 val; sbus_writel(y, &fbc->y0); sbus_writel(x, &fbc->x0); sbus_writel(x + 32 - 1, &fbc->x1); val = ((u32)data[0] << 24) | ((u32)data[1] << 16) | ((u32)data[2] << 8) | ((u32)data[3] << 0); sbus_writel(val, &fbc->font); data += 4; x += 32; width -= 32; } if (width) { u32 val; sbus_writel(y, &fbc->y0); sbus_writel(x, &fbc->x0); sbus_writel(x + width - 1, &fbc->x1); if (width <= 8) { val = (u32) data[0] << 24; data += 1; } else if (width <= 16) { val = ((u32) data[0] << 24) | ((u32) data[1] << 16); data += 2; } else { val = ((u32) data[0] << 24) | ((u32) data[1] << 16) | ((u32) data[2] << 8); data += 3; } sbus_writel(val, &fbc->font); } y += 1; x = image->dx; } spin_unlock_irqrestore(&par->lock, flags); } /** * cg6_setcolreg - Sets a color register. * * @regno: boolean, 0 copy local, 1 get_user() function * @red: frame buffer colormap structure * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure */ static int cg6_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct cg6_par *par = (struct cg6_par *)info->par; struct bt_regs __iomem *bt = par->bt; unsigned long flags; if (regno >= 256) return 1; red >>= 8; green >>= 8; blue >>= 8; spin_lock_irqsave(&par->lock, flags); sbus_writel((u32)regno << 24, &bt->addr); sbus_writel((u32)red << 24, &bt->color_map); sbus_writel((u32)green << 24, &bt->color_map); sbus_writel((u32)blue << 24, &bt->color_map); spin_unlock_irqrestore(&par->lock, flags); return 0; } /** * cg6_blank - Blanks the display. * * @blank: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer */ static int cg6_blank(int blank, struct fb_info *info) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_thc __iomem *thc = par->thc; unsigned long flags; u32 val; spin_lock_irqsave(&par->lock, flags); val = sbus_readl(&thc->thc_misc); switch (blank) { case FB_BLANK_UNBLANK: /* Unblanking */ val |= CG6_THC_MISC_VIDEO; par->flags &= ~CG6_FLAG_BLANKED; break; case FB_BLANK_NORMAL: /* Normal blanking */ case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ case FB_BLANK_POWERDOWN: /* Poweroff */ val &= ~CG6_THC_MISC_VIDEO; par->flags |= CG6_FLAG_BLANKED; break; } sbus_writel(val, &thc->thc_misc); spin_unlock_irqrestore(&par->lock, flags); return 0; } static const struct sbus_mmap_map cg6_mmap_map[] = { { .voff = CG6_FBC, .poff = CG6_FBC_OFFSET, .size = PAGE_SIZE }, { .voff = CG6_TEC, .poff = CG6_TEC_OFFSET, .size = PAGE_SIZE }, { .voff = CG6_BTREGS, .poff = CG6_BROOKTREE_OFFSET, .size = PAGE_SIZE }, { .voff = CG6_FHC, .poff = CG6_FHC_OFFSET, .size = PAGE_SIZE }, { .voff = CG6_THC, .poff = CG6_THC_OFFSET, .size = PAGE_SIZE }, { .voff = CG6_ROM, .poff = CG6_ROM_OFFSET, .size = 0x10000 }, { .voff = CG6_RAM, .poff = CG6_RAM_OFFSET, .size = SBUS_MMAP_FBSIZE(1) }, { .voff = CG6_DHC, .poff = CG6_DHC_OFFSET, .size = 0x40000 }, { .size = 0 } }; static int cg6_sbusfb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct cg6_par *par = (struct cg6_par *)info->par; return sbusfb_mmap_helper(cg6_mmap_map, info->fix.smem_start, info->fix.smem_len, par->which_io, vma); } static int cg6_sbusfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { return sbusfb_ioctl_helper(cmd, arg, info, FBTYPE_SUNFAST_COLOR, 8, info->fix.smem_len); } /* * Initialisation */ static void cg6_init_fix(struct fb_info *info, int linebytes) { struct cg6_par *par = (struct cg6_par *)info->par; const char *cg6_cpu_name, *cg6_card_name; u32 conf; conf = sbus_readl(par->fhc); switch (conf & CG6_FHC_CPU_MASK) { case CG6_FHC_CPU_SPARC: cg6_cpu_name = "sparc"; break; case CG6_FHC_CPU_68020: cg6_cpu_name = "68020"; break; default: cg6_cpu_name = "i386"; break; } if (((conf >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK) >= 11) { if (info->fix.smem_len <= 0x100000) cg6_card_name = "TGX"; else cg6_card_name = "TGX+"; } else { if (info->fix.smem_len <= 0x100000) cg6_card_name = "GX"; else cg6_card_name = "GX+"; } sprintf(info->fix.id, "%s %s", cg6_card_name, cg6_cpu_name); info->fix.id[sizeof(info->fix.id) - 1] = 0; info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = linebytes; info->fix.accel = FB_ACCEL_SUN_CGSIX; } /* Initialize Brooktree DAC */ static void cg6_bt_init(struct cg6_par *par) { struct bt_regs __iomem *bt = par->bt; sbus_writel(0x04 << 24, &bt->addr); /* color planes */ sbus_writel(0xff << 24, &bt->control); sbus_writel(0x05 << 24, &bt->addr); sbus_writel(0x00 << 24, &bt->control); sbus_writel(0x06 << 24, &bt->addr); /* overlay plane */ sbus_writel(0x73 << 24, &bt->control); sbus_writel(0x07 << 24, &bt->addr); sbus_writel(0x00 << 24, &bt->control); } static void cg6_chip_init(struct fb_info *info) { struct cg6_par *par = (struct cg6_par *)info->par; struct cg6_tec __iomem *tec = par->tec; struct cg6_fbc __iomem *fbc = par->fbc; struct cg6_thc __iomem *thc = par->thc; u32 rev, conf, mode; int i; /* Hide the cursor. */ sbus_writel(CG6_THC_CURSOFF, &thc->thc_cursxy); /* Turn off stuff in the Transform Engine. */ sbus_writel(0, &tec->tec_matrix); sbus_writel(0, &tec->tec_clip); sbus_writel(0, &tec->tec_vdc); /* Take care of bugs in old revisions. */ rev = (sbus_readl(par->fhc) >> CG6_FHC_REV_SHIFT) & CG6_FHC_REV_MASK; if (rev < 5) { conf = (sbus_readl(par->fhc) & CG6_FHC_RES_MASK) | CG6_FHC_CPU_68020 | CG6_FHC_TEST | (11 << CG6_FHC_TEST_X_SHIFT) | (11 << CG6_FHC_TEST_Y_SHIFT); if (rev < 2) conf |= CG6_FHC_DST_DISABLE; sbus_writel(conf, par->fhc); } /* Set things in the FBC. Bad things appear to happen if we do * back to back store/loads on the mode register, so copy it * out instead. */ mode = sbus_readl(&fbc->mode); do { i = sbus_readl(&fbc->s); } while (i & 0x10000000); mode &= ~(CG6_FBC_BLIT_MASK | CG6_FBC_MODE_MASK | CG6_FBC_DRAW_MASK | CG6_FBC_BWRITE0_MASK | CG6_FBC_BWRITE1_MASK | CG6_FBC_BREAD_MASK | CG6_FBC_BDISP_MASK); mode |= (CG6_FBC_BLIT_SRC | CG6_FBC_MODE_COLOR8 | CG6_FBC_DRAW_RENDER | CG6_FBC_BWRITE0_ENABLE | CG6_FBC_BWRITE1_DISABLE | CG6_FBC_BREAD_0 | CG6_FBC_BDISP_0); sbus_writel(mode, &fbc->mode); sbus_writel(0, &fbc->clip); sbus_writel(0, &fbc->offx); sbus_writel(0, &fbc->offy); sbus_writel(0, &fbc->clipminx); sbus_writel(0, &fbc->clipminy); sbus_writel(info->var.xres - 1, &fbc->clipmaxx); sbus_writel(info->var.yres - 1, &fbc->clipmaxy); } static void cg6_unmap_regs(struct platform_device *op, struct fb_info *info, struct cg6_par *par) { if (par->fbc) of_iounmap(&op->resource[0], par->fbc, 4096); if (par->tec) of_iounmap(&op->resource[0], par->tec, sizeof(struct cg6_tec)); if (par->thc) of_iounmap(&op->resource[0], par->thc, sizeof(struct cg6_thc)); if (par->bt) of_iounmap(&op->resource[0], par->bt, sizeof(struct bt_regs)); if (par->fhc) of_iounmap(&op->resource[0], par->fhc, sizeof(u32)); if (info->screen_base) of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); } static int cg6_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct fb_info *info; struct cg6_par *par; int linebytes, err; int dblbuf; info = framebuffer_alloc(sizeof(struct cg6_par), &op->dev); err = -ENOMEM; if (!info) goto out_err; par = info->par; spin_lock_init(&par->lock); info->fix.smem_start = op->resource[0].start; par->which_io = op->resource[0].flags & IORESOURCE_BITS; sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; linebytes = of_getintprop_default(dp, "linebytes", info->var.xres); info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres); dblbuf = of_getintprop_default(dp, "dblbuf", 0); if (dblbuf) info->fix.smem_len *= 4; par->fbc = of_ioremap(&op->resource[0], CG6_FBC_OFFSET, 4096, "cgsix fbc"); par->tec = of_ioremap(&op->resource[0], CG6_TEC_OFFSET, sizeof(struct cg6_tec), "cgsix tec"); par->thc = of_ioremap(&op->resource[0], CG6_THC_OFFSET, sizeof(struct cg6_thc), "cgsix thc"); par->bt = of_ioremap(&op->resource[0], CG6_BROOKTREE_OFFSET, sizeof(struct bt_regs), "cgsix dac"); par->fhc = of_ioremap(&op->resource[0], CG6_FHC_OFFSET, sizeof(u32), "cgsix fhc"); info->flags = FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_READS_FAST; info->fbops = &cg6_ops; info->screen_base = of_ioremap(&op->resource[0], CG6_RAM_OFFSET, info->fix.smem_len, "cgsix ram"); if (!par->fbc || !par->tec || !par->thc || !par->bt || !par->fhc || !info->screen_base) goto out_unmap_regs; info->var.accel_flags = FB_ACCELF_TEXT; cg6_bt_init(par); cg6_chip_init(info); cg6_blank(FB_BLANK_UNBLANK, info); if (fb_alloc_cmap(&info->cmap, 256, 0)) goto out_unmap_regs; fb_set_cmap(&info->cmap, info); cg6_init_fix(info, linebytes); err = register_framebuffer(info); if (err < 0) goto out_dealloc_cmap; dev_set_drvdata(&op->dev, info); printk(KERN_INFO "%pOF: CGsix [%s] at %lx:%lx\n", dp, info->fix.id, par->which_io, info->fix.smem_start); return 0; out_dealloc_cmap: fb_dealloc_cmap(&info->cmap); out_unmap_regs: cg6_unmap_regs(op, info, par); framebuffer_release(info); out_err: return err; } static void cg6_remove(struct platform_device *op) { struct fb_info *info = dev_get_drvdata(&op->dev); struct cg6_par *par = info->par; unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); cg6_unmap_regs(op, info, par); framebuffer_release(info); } static const struct of_device_id cg6_match[] = { { .name = "cgsix", }, { .name = "cgthree+", }, {}, }; MODULE_DEVICE_TABLE(of, cg6_match); static struct platform_driver cg6_driver = { .driver = { .name = "cg6", .of_match_table = cg6_match, }, .probe = cg6_probe, .remove = cg6_remove, }; static int __init cg6_init(void) { if (fb_get_options("cg6fb", NULL)) return -ENODEV; return platform_driver_register(&cg6_driver); } static void __exit cg6_exit(void) { platform_driver_unregister(&cg6_driver); } module_init(cg6_init); module_exit(cg6_exit); MODULE_DESCRIPTION("framebuffer driver for CGsix chipsets"); MODULE_AUTHOR("David S. Miller <[email protected]>"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2012-2019 ARM Limited (or its affiliates). */ #include <crypto/internal/aead.h> #include <crypto/authenc.h> #include <crypto/scatterwalk.h> #include <linux/dmapool.h> #include <linux/dma-mapping.h> #include "cc_buffer_mgr.h" #include "cc_lli_defs.h" #include "cc_cipher.h" #include "cc_hash.h" #include "cc_aead.h" union buffer_array_entry { struct scatterlist *sgl; dma_addr_t buffer_dma; }; struct buffer_array { unsigned int num_of_buffers; union buffer_array_entry entry[MAX_NUM_OF_BUFFERS_IN_MLLI]; unsigned int offset[MAX_NUM_OF_BUFFERS_IN_MLLI]; int nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; int total_data_len[MAX_NUM_OF_BUFFERS_IN_MLLI]; bool is_last[MAX_NUM_OF_BUFFERS_IN_MLLI]; u32 *mlli_nents[MAX_NUM_OF_BUFFERS_IN_MLLI]; }; static inline char *cc_dma_buf_type(enum cc_req_dma_buf_type type) { switch (type) { case CC_DMA_BUF_NULL: return "BUF_NULL"; case CC_DMA_BUF_DLLI: return "BUF_DLLI"; case CC_DMA_BUF_MLLI: return "BUF_MLLI"; default: return "BUF_INVALID"; } } /** * cc_copy_mac() - Copy MAC to temporary location * * @dev: device object * @req: aead request object * @dir: [IN] copy from/to sgl */ static void cc_copy_mac(struct device *dev, struct aead_request *req, enum cc_sg_cpy_direct dir) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); u32 skip = req->assoclen + req->cryptlen; cc_copy_sg_portion(dev, areq_ctx->backup_mac, req->src, (skip - areq_ctx->req_authsize), skip, dir); } /** * cc_get_sgl_nents() - Get scatterlist number of entries. * * @dev: Device object * @sg_list: SG list * @nbytes: [IN] Total SGL data bytes. * @lbytes: [OUT] Returns the amount of bytes at the last entry * * Return: * Number of entries in the scatterlist */ static unsigned int cc_get_sgl_nents(struct device *dev, struct scatterlist *sg_list, unsigned int nbytes, u32 *lbytes) { unsigned int nents = 0; *lbytes = 0; while (nbytes && sg_list) { nents++; /* get the number of bytes in the last entry */ *lbytes = nbytes; nbytes -= (sg_list->length > nbytes) ? nbytes : sg_list->length; sg_list = sg_next(sg_list); } dev_dbg(dev, "nents %d last bytes %d\n", nents, *lbytes); return nents; } /** * cc_copy_sg_portion() - Copy scatter list data, * from to_skip to end, to dest and vice versa * * @dev: Device object * @dest: Buffer to copy to/from * @sg: SG list * @to_skip: Number of bytes to skip before copying * @end: Offset of last byte to copy * @direct: Transfer direction (true == from SG list to buffer, false == from * buffer to SG list) */ void cc_copy_sg_portion(struct device *dev, u8 *dest, struct scatterlist *sg, u32 to_skip, u32 end, enum cc_sg_cpy_direct direct) { u32 nents; nents = sg_nents_for_len(sg, end); sg_copy_buffer(sg, nents, dest, (end - to_skip + 1), to_skip, (direct == CC_SG_TO_BUF)); } static int cc_render_buff_to_mlli(struct device *dev, dma_addr_t buff_dma, u32 buff_size, u32 *curr_nents, u32 **mlli_entry_pp) { u32 *mlli_entry_p = *mlli_entry_pp; u32 new_nents; /* Verify there is no memory overflow*/ new_nents = (*curr_nents + buff_size / CC_MAX_MLLI_ENTRY_SIZE + 1); if (new_nents > MAX_NUM_OF_TOTAL_MLLI_ENTRIES) { dev_err(dev, "Too many mlli entries. current %d max %d\n", new_nents, MAX_NUM_OF_TOTAL_MLLI_ENTRIES); return -ENOMEM; } /*handle buffer longer than 64 kbytes */ while (buff_size > CC_MAX_MLLI_ENTRY_SIZE) { cc_lli_set_addr(mlli_entry_p, buff_dma); cc_lli_set_size(mlli_entry_p, CC_MAX_MLLI_ENTRY_SIZE); dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], mlli_entry_p[LLI_WORD1_OFFSET]); buff_dma += CC_MAX_MLLI_ENTRY_SIZE; buff_size -= CC_MAX_MLLI_ENTRY_SIZE; mlli_entry_p = mlli_entry_p + 2; (*curr_nents)++; } /*Last entry */ cc_lli_set_addr(mlli_entry_p, buff_dma); cc_lli_set_size(mlli_entry_p, buff_size); dev_dbg(dev, "entry[%d]: single_buff=0x%08X size=%08X\n", *curr_nents, mlli_entry_p[LLI_WORD0_OFFSET], mlli_entry_p[LLI_WORD1_OFFSET]); mlli_entry_p = mlli_entry_p + 2; *mlli_entry_pp = mlli_entry_p; (*curr_nents)++; return 0; } static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl, u32 sgl_data_len, u32 sgl_offset, u32 *curr_nents, u32 **mlli_entry_pp) { struct scatterlist *curr_sgl = sgl; u32 *mlli_entry_p = *mlli_entry_pp; s32 rc = 0; for ( ; (curr_sgl && sgl_data_len); curr_sgl = sg_next(curr_sgl)) { u32 entry_data_len = (sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ? sg_dma_len(curr_sgl) - sgl_offset : sgl_data_len; sgl_data_len -= entry_data_len; rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) + sgl_offset, entry_data_len, curr_nents, &mlli_entry_p); if (rc) return rc; sgl_offset = 0; } *mlli_entry_pp = mlli_entry_p; return 0; } static int cc_generate_mlli(struct device *dev, struct buffer_array *sg_data, struct mlli_params *mlli_params, gfp_t flags) { u32 *mlli_p; u32 total_nents = 0, prev_total_nents = 0; int rc = 0, i; dev_dbg(dev, "NUM of SG's = %d\n", sg_data->num_of_buffers); /* Allocate memory from the pointed pool */ mlli_params->mlli_virt_addr = dma_pool_alloc(mlli_params->curr_pool, flags, &mlli_params->mlli_dma_addr); if (!mlli_params->mlli_virt_addr) { dev_err(dev, "dma_pool_alloc() failed\n"); rc = -ENOMEM; goto build_mlli_exit; } /* Point to start of MLLI */ mlli_p = mlli_params->mlli_virt_addr; /* go over all SG's and link it to one MLLI table */ for (i = 0; i < sg_data->num_of_buffers; i++) { union buffer_array_entry *entry = &sg_data->entry[i]; u32 tot_len = sg_data->total_data_len[i]; u32 offset = sg_data->offset[i]; rc = cc_render_sg_to_mlli(dev, entry->sgl, tot_len, offset, &total_nents, &mlli_p); if (rc) return rc; /* set last bit in the current table */ if (sg_data->mlli_nents[i]) { /*Calculate the current MLLI table length for the *length field in the descriptor */ *sg_data->mlli_nents[i] += (total_nents - prev_total_nents); prev_total_nents = total_nents; } } /* Set MLLI size for the bypass operation */ mlli_params->mlli_len = (total_nents * LLI_ENTRY_BYTE_SIZE); dev_dbg(dev, "MLLI params: virt_addr=%pK dma_addr=%pad mlli_len=0x%X\n", mlli_params->mlli_virt_addr, &mlli_params->mlli_dma_addr, mlli_params->mlli_len); build_mlli_exit: return rc; } static void cc_add_sg_entry(struct device *dev, struct buffer_array *sgl_data, unsigned int nents, struct scatterlist *sgl, unsigned int data_len, unsigned int data_offset, bool is_last_table, u32 *mlli_nents) { unsigned int index = sgl_data->num_of_buffers; dev_dbg(dev, "index=%u nents=%u sgl=%pK data_len=0x%08X is_last=%d\n", index, nents, sgl, data_len, is_last_table); sgl_data->nents[index] = nents; sgl_data->entry[index].sgl = sgl; sgl_data->offset[index] = data_offset; sgl_data->total_data_len[index] = data_len; sgl_data->is_last[index] = is_last_table; sgl_data->mlli_nents[index] = mlli_nents; if (sgl_data->mlli_nents[index]) *sgl_data->mlli_nents[index] = 0; sgl_data->num_of_buffers++; } static int cc_map_sg(struct device *dev, struct scatterlist *sg, unsigned int nbytes, int direction, u32 *nents, u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents) { int ret = 0; if (!nbytes) { *mapped_nents = 0; *lbytes = 0; *nents = 0; return 0; } *nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes); if (*nents > max_sg_nents) { *nents = 0; dev_err(dev, "Too many fragments. current %d max %d\n", *nents, max_sg_nents); return -ENOMEM; } ret = dma_map_sg(dev, sg, *nents, direction); if (!ret) { *nents = 0; dev_err(dev, "dma_map_sg() sg buffer failed %d\n", ret); return -ENOMEM; } *mapped_nents = ret; return 0; } static int cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx, u8 *config_data, struct buffer_array *sg_data, unsigned int assoclen) { dev_dbg(dev, " handle additional data config set to DLLI\n"); /* create sg for the current buffer */ sg_init_one(&areq_ctx->ccm_adata_sg, config_data, AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size); if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) { dev_err(dev, "dma_map_sg() config buffer failed\n"); return -ENOMEM; } dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", &sg_dma_address(&areq_ctx->ccm_adata_sg), sg_page(&areq_ctx->ccm_adata_sg), sg_virt(&areq_ctx->ccm_adata_sg), areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length); /* prepare for case of MLLI */ if (assoclen > 0) { cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg, (AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size), 0, false, NULL); } return 0; } static int cc_set_hash_buf(struct device *dev, struct ahash_req_ctx *areq_ctx, u8 *curr_buff, u32 curr_buff_cnt, struct buffer_array *sg_data) { dev_dbg(dev, " handle curr buff %x set to DLLI\n", curr_buff_cnt); /* create sg for the current buffer */ sg_init_one(areq_ctx->buff_sg, curr_buff, curr_buff_cnt); if (dma_map_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE) != 1) { dev_err(dev, "dma_map_sg() src buffer failed\n"); return -ENOMEM; } dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n", &sg_dma_address(areq_ctx->buff_sg), sg_page(areq_ctx->buff_sg), sg_virt(areq_ctx->buff_sg), areq_ctx->buff_sg->offset, areq_ctx->buff_sg->length); areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; areq_ctx->curr_sg = areq_ctx->buff_sg; areq_ctx->in_nents = 0; /* prepare for case of MLLI */ cc_add_sg_entry(dev, sg_data, 1, areq_ctx->buff_sg, curr_buff_cnt, 0, false, NULL); return 0; } void cc_unmap_cipher_request(struct device *dev, void *ctx, unsigned int ivsize, struct scatterlist *src, struct scatterlist *dst) { struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; if (req_ctx->gen_ctx.iv_dma_addr) { dev_dbg(dev, "Unmapped iv: iv_dma_addr=%pad iv_size=%u\n", &req_ctx->gen_ctx.iv_dma_addr, ivsize); dma_unmap_single(dev, req_ctx->gen_ctx.iv_dma_addr, ivsize, DMA_BIDIRECTIONAL); } /* Release pool */ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI && req_ctx->mlli_params.mlli_virt_addr) { dma_pool_free(req_ctx->mlli_params.curr_pool, req_ctx->mlli_params.mlli_virt_addr, req_ctx->mlli_params.mlli_dma_addr); } if (src != dst) { dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_TO_DEVICE); dma_unmap_sg(dev, dst, req_ctx->out_nents, DMA_FROM_DEVICE); dev_dbg(dev, "Unmapped req->dst=%pK\n", sg_virt(dst)); dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); } else { dma_unmap_sg(dev, src, req_ctx->in_nents, DMA_BIDIRECTIONAL); dev_dbg(dev, "Unmapped req->src=%pK\n", sg_virt(src)); } } int cc_map_cipher_request(struct cc_drvdata *drvdata, void *ctx, unsigned int ivsize, unsigned int nbytes, void *info, struct scatterlist *src, struct scatterlist *dst, gfp_t flags) { struct cipher_req_ctx *req_ctx = (struct cipher_req_ctx *)ctx; struct mlli_params *mlli_params = &req_ctx->mlli_params; struct device *dev = drvdata_to_dev(drvdata); struct buffer_array sg_data; u32 dummy = 0; int rc = 0; u32 mapped_nents = 0; int src_direction = (src != dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); req_ctx->dma_buf_type = CC_DMA_BUF_DLLI; mlli_params->curr_pool = NULL; sg_data.num_of_buffers = 0; /* Map IV buffer */ if (ivsize) { dump_byte_array("iv", info, ivsize); req_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, info, ivsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, req_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", ivsize, info); return -ENOMEM; } dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", ivsize, info, &req_ctx->gen_ctx.iv_dma_addr); } else { req_ctx->gen_ctx.iv_dma_addr = 0; } /* Map the src SGL */ rc = cc_map_sg(dev, src, nbytes, src_direction, &req_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto cipher_exit; if (mapped_nents > 1) req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; if (src == dst) { /* Handle inplace operation */ if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { req_ctx->out_nents = 0; cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, nbytes, 0, true, &req_ctx->in_mlli_nents); } } else { /* Map the dst sg */ rc = cc_map_sg(dev, dst, nbytes, DMA_FROM_DEVICE, &req_ctx->out_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto cipher_exit; if (mapped_nents > 1) req_ctx->dma_buf_type = CC_DMA_BUF_MLLI; if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { cc_add_sg_entry(dev, &sg_data, req_ctx->in_nents, src, nbytes, 0, true, &req_ctx->in_mlli_nents); cc_add_sg_entry(dev, &sg_data, req_ctx->out_nents, dst, nbytes, 0, true, &req_ctx->out_mlli_nents); } } if (req_ctx->dma_buf_type == CC_DMA_BUF_MLLI) { mlli_params->curr_pool = drvdata->mlli_buffs_pool; rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto cipher_exit; } dev_dbg(dev, "areq_ctx->dma_buf_type = %s\n", cc_dma_buf_type(req_ctx->dma_buf_type)); return 0; cipher_exit: cc_unmap_cipher_request(dev, req_ctx, ivsize, src, dst); return rc; } void cc_unmap_aead_request(struct device *dev, struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct cc_drvdata *drvdata = dev_get_drvdata(dev); int src_direction = (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL); if (areq_ctx->mac_buf_dma_addr) { dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr, MAX_MAC_SIZE, DMA_BIDIRECTIONAL); } if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { if (areq_ctx->hkey_dma_addr) { dma_unmap_single(dev, areq_ctx->hkey_dma_addr, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); } if (areq_ctx->gcm_block_len_dma_addr) { dma_unmap_single(dev, areq_ctx->gcm_block_len_dma_addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); } if (areq_ctx->gcm_iv_inc1_dma_addr) { dma_unmap_single(dev, areq_ctx->gcm_iv_inc1_dma_addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); } if (areq_ctx->gcm_iv_inc2_dma_addr) { dma_unmap_single(dev, areq_ctx->gcm_iv_inc2_dma_addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); } } if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { if (areq_ctx->ccm_iv0_dma_addr) { dma_unmap_single(dev, areq_ctx->ccm_iv0_dma_addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); } dma_unmap_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE); } if (areq_ctx->gen_ctx.iv_dma_addr) { dma_unmap_single(dev, areq_ctx->gen_ctx.iv_dma_addr, hw_iv_size, DMA_BIDIRECTIONAL); kfree_sensitive(areq_ctx->gen_ctx.iv); } /* Release pool */ if ((areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) && (areq_ctx->mlli_params.mlli_virt_addr)) { dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", &areq_ctx->mlli_params.mlli_dma_addr, areq_ctx->mlli_params.mlli_virt_addr); dma_pool_free(areq_ctx->mlli_params.curr_pool, areq_ctx->mlli_params.mlli_virt_addr, areq_ctx->mlli_params.mlli_dma_addr); } dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n", sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents, areq_ctx->assoclen, req->cryptlen); dma_unmap_sg(dev, req->src, areq_ctx->src.mapped_nents, src_direction); if (req->src != req->dst) { dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n", sg_virt(req->dst)); dma_unmap_sg(dev, req->dst, areq_ctx->dst.mapped_nents, DMA_FROM_DEVICE); } if (drvdata->coherent && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && req->src == req->dst) { /* copy back mac from temporary location to deal with possible * data memory overriding that caused by cache coherence * problem. */ cc_copy_mac(dev, req, CC_SG_FROM_BUF); } } static bool cc_is_icv_frag(unsigned int sgl_nents, unsigned int authsize, u32 last_entry_data_size) { return ((sgl_nents > 1) && (last_entry_data_size < authsize)); } static int cc_aead_chain_iv(struct cc_drvdata *drvdata, struct aead_request *req, struct buffer_array *sg_data, bool is_last, bool do_chain) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); unsigned int hw_iv_size = areq_ctx->hw_iv_size; struct device *dev = drvdata_to_dev(drvdata); gfp_t flags = cc_gfp_flags(&req->base); int rc = 0; if (!req->iv) { areq_ctx->gen_ctx.iv_dma_addr = 0; areq_ctx->gen_ctx.iv = NULL; goto chain_iv_exit; } areq_ctx->gen_ctx.iv = kmemdup(req->iv, hw_iv_size, flags); if (!areq_ctx->gen_ctx.iv) return -ENOMEM; areq_ctx->gen_ctx.iv_dma_addr = dma_map_single(dev, areq_ctx->gen_ctx.iv, hw_iv_size, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, areq_ctx->gen_ctx.iv_dma_addr)) { dev_err(dev, "Mapping iv %u B at va=%pK for DMA failed\n", hw_iv_size, req->iv); kfree_sensitive(areq_ctx->gen_ctx.iv); areq_ctx->gen_ctx.iv = NULL; rc = -ENOMEM; goto chain_iv_exit; } dev_dbg(dev, "Mapped iv %u B at va=%pK to dma=%pad\n", hw_iv_size, req->iv, &areq_ctx->gen_ctx.iv_dma_addr); chain_iv_exit: return rc; } static int cc_aead_chain_assoc(struct cc_drvdata *drvdata, struct aead_request *req, struct buffer_array *sg_data, bool is_last, bool do_chain) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); int rc = 0; int mapped_nents = 0; struct device *dev = drvdata_to_dev(drvdata); if (!sg_data) { rc = -EINVAL; goto chain_assoc_exit; } if (areq_ctx->assoclen == 0) { areq_ctx->assoc_buff_type = CC_DMA_BUF_NULL; areq_ctx->assoc.nents = 0; areq_ctx->assoc.mlli_nents = 0; dev_dbg(dev, "Chain assoc of length 0: buff_type=%s nents=%u\n", cc_dma_buf_type(areq_ctx->assoc_buff_type), areq_ctx->assoc.nents); goto chain_assoc_exit; } mapped_nents = sg_nents_for_len(req->src, areq_ctx->assoclen); if (mapped_nents < 0) return mapped_nents; if (mapped_nents > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { dev_err(dev, "Too many fragments. current %d max %d\n", mapped_nents, LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); return -ENOMEM; } areq_ctx->assoc.nents = mapped_nents; /* in CCM case we have additional entry for * ccm header configurations */ if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { if ((mapped_nents + 1) > LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES) { dev_err(dev, "CCM case.Too many fragments. Current %d max %d\n", (areq_ctx->assoc.nents + 1), LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES); rc = -ENOMEM; goto chain_assoc_exit; } } if (mapped_nents == 1 && areq_ctx->ccm_hdr_size == ccm_header_size_null) areq_ctx->assoc_buff_type = CC_DMA_BUF_DLLI; else areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; if (do_chain || areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { dev_dbg(dev, "Chain assoc: buff_type=%s nents=%u\n", cc_dma_buf_type(areq_ctx->assoc_buff_type), areq_ctx->assoc.nents); cc_add_sg_entry(dev, sg_data, areq_ctx->assoc.nents, req->src, areq_ctx->assoclen, 0, is_last, &areq_ctx->assoc.mlli_nents); areq_ctx->assoc_buff_type = CC_DMA_BUF_MLLI; } chain_assoc_exit: return rc; } static void cc_prepare_aead_data_dlli(struct aead_request *req, u32 *src_last_bytes, u32 *dst_last_bytes) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; struct scatterlist *sg; ssize_t offset; areq_ctx->is_icv_fragmented = false; if ((req->src == req->dst) || direct == DRV_CRYPTO_DIRECTION_DECRYPT) { sg = areq_ctx->src_sgl; offset = *src_last_bytes - authsize; } else { sg = areq_ctx->dst_sgl; offset = *dst_last_bytes - authsize; } areq_ctx->icv_dma_addr = sg_dma_address(sg) + offset; areq_ctx->icv_virt_addr = sg_virt(sg) + offset; } static void cc_prepare_aead_data_mlli(struct cc_drvdata *drvdata, struct aead_request *req, struct buffer_array *sg_data, u32 *src_last_bytes, u32 *dst_last_bytes, bool is_last_table) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; struct device *dev = drvdata_to_dev(drvdata); struct scatterlist *sg; if (req->src == req->dst) { /*INPLACE*/ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, areq_ctx->src_sgl, areq_ctx->cryptlen, areq_ctx->src_offset, is_last_table, &areq_ctx->src.mlli_nents); areq_ctx->is_icv_fragmented = cc_is_icv_frag(areq_ctx->src.nents, authsize, *src_last_bytes); if (areq_ctx->is_icv_fragmented) { /* Backup happens only when ICV is fragmented, ICV * verification is made by CPU compare in order to * simplify MAC verification upon request completion */ if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { /* In coherent platforms (e.g. ACP) * already copying ICV for any * INPLACE-DECRYPT operation, hence * we must neglect this code. */ if (!drvdata->coherent) cc_copy_mac(dev, req, CC_SG_TO_BUF); areq_ctx->icv_virt_addr = areq_ctx->backup_mac; } else { areq_ctx->icv_virt_addr = areq_ctx->mac_buf; areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; } } else { /* Contig. ICV */ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; /*Should hanlde if the sg is not contig.*/ areq_ctx->icv_dma_addr = sg_dma_address(sg) + (*src_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt(sg) + (*src_last_bytes - authsize); } } else if (direct == DRV_CRYPTO_DIRECTION_DECRYPT) { /*NON-INPLACE and DECRYPT*/ cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, areq_ctx->src_sgl, areq_ctx->cryptlen, areq_ctx->src_offset, is_last_table, &areq_ctx->src.mlli_nents); cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, areq_ctx->dst_sgl, areq_ctx->cryptlen, areq_ctx->dst_offset, is_last_table, &areq_ctx->dst.mlli_nents); areq_ctx->is_icv_fragmented = cc_is_icv_frag(areq_ctx->src.nents, authsize, *src_last_bytes); /* Backup happens only when ICV is fragmented, ICV * verification is made by CPU compare in order to simplify * MAC verification upon request completion */ if (areq_ctx->is_icv_fragmented) { cc_copy_mac(dev, req, CC_SG_TO_BUF); areq_ctx->icv_virt_addr = areq_ctx->backup_mac; } else { /* Contig. ICV */ sg = &areq_ctx->src_sgl[areq_ctx->src.nents - 1]; /*Should hanlde if the sg is not contig.*/ areq_ctx->icv_dma_addr = sg_dma_address(sg) + (*src_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt(sg) + (*src_last_bytes - authsize); } } else { /*NON-INPLACE and ENCRYPT*/ cc_add_sg_entry(dev, sg_data, areq_ctx->dst.nents, areq_ctx->dst_sgl, areq_ctx->cryptlen, areq_ctx->dst_offset, is_last_table, &areq_ctx->dst.mlli_nents); cc_add_sg_entry(dev, sg_data, areq_ctx->src.nents, areq_ctx->src_sgl, areq_ctx->cryptlen, areq_ctx->src_offset, is_last_table, &areq_ctx->src.mlli_nents); areq_ctx->is_icv_fragmented = cc_is_icv_frag(areq_ctx->dst.nents, authsize, *dst_last_bytes); if (!areq_ctx->is_icv_fragmented) { sg = &areq_ctx->dst_sgl[areq_ctx->dst.nents - 1]; /* Contig. ICV */ areq_ctx->icv_dma_addr = sg_dma_address(sg) + (*dst_last_bytes - authsize); areq_ctx->icv_virt_addr = sg_virt(sg) + (*dst_last_bytes - authsize); } else { areq_ctx->icv_dma_addr = areq_ctx->mac_buf_dma_addr; areq_ctx->icv_virt_addr = areq_ctx->mac_buf; } } } static int cc_aead_chain_data(struct cc_drvdata *drvdata, struct aead_request *req, struct buffer_array *sg_data, bool is_last_table, bool do_chain) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct device *dev = drvdata_to_dev(drvdata); enum drv_crypto_direction direct = areq_ctx->gen_ctx.op_type; unsigned int authsize = areq_ctx->req_authsize; unsigned int src_last_bytes = 0, dst_last_bytes = 0; int rc = 0; u32 src_mapped_nents = 0, dst_mapped_nents = 0; u32 offset = 0; /* non-inplace mode */ unsigned int size_for_map = req->assoclen + req->cryptlen; u32 sg_index = 0; u32 size_to_skip = req->assoclen; struct scatterlist *sgl; offset = size_to_skip; if (!sg_data) return -EINVAL; areq_ctx->src_sgl = req->src; areq_ctx->dst_sgl = req->dst; size_for_map += (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) ? authsize : 0; src_mapped_nents = cc_get_sgl_nents(dev, req->src, size_for_map, &src_last_bytes); sg_index = areq_ctx->src_sgl->length; //check where the data starts while (src_mapped_nents && (sg_index <= size_to_skip)) { src_mapped_nents--; offset -= areq_ctx->src_sgl->length; sgl = sg_next(areq_ctx->src_sgl); if (!sgl) break; areq_ctx->src_sgl = sgl; sg_index += areq_ctx->src_sgl->length; } if (src_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { dev_err(dev, "Too many fragments. current %d max %d\n", src_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); return -ENOMEM; } areq_ctx->src.nents = src_mapped_nents; areq_ctx->src_offset = offset; if (req->src != req->dst) { size_for_map = req->assoclen + req->cryptlen; if (direct == DRV_CRYPTO_DIRECTION_ENCRYPT) size_for_map += authsize; else size_for_map -= authsize; rc = cc_map_sg(dev, req->dst, size_for_map, DMA_FROM_DEVICE, &areq_ctx->dst.mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dst_last_bytes, &dst_mapped_nents); if (rc) goto chain_data_exit; } dst_mapped_nents = cc_get_sgl_nents(dev, req->dst, size_for_map, &dst_last_bytes); sg_index = areq_ctx->dst_sgl->length; offset = size_to_skip; //check where the data starts while (dst_mapped_nents && sg_index <= size_to_skip) { dst_mapped_nents--; offset -= areq_ctx->dst_sgl->length; sgl = sg_next(areq_ctx->dst_sgl); if (!sgl) break; areq_ctx->dst_sgl = sgl; sg_index += areq_ctx->dst_sgl->length; } if (dst_mapped_nents > LLI_MAX_NUM_OF_DATA_ENTRIES) { dev_err(dev, "Too many fragments. current %d max %d\n", dst_mapped_nents, LLI_MAX_NUM_OF_DATA_ENTRIES); return -ENOMEM; } areq_ctx->dst.nents = dst_mapped_nents; areq_ctx->dst_offset = offset; if (src_mapped_nents > 1 || dst_mapped_nents > 1 || do_chain) { areq_ctx->data_buff_type = CC_DMA_BUF_MLLI; cc_prepare_aead_data_mlli(drvdata, req, sg_data, &src_last_bytes, &dst_last_bytes, is_last_table); } else { areq_ctx->data_buff_type = CC_DMA_BUF_DLLI; cc_prepare_aead_data_dlli(req, &src_last_bytes, &dst_last_bytes); } chain_data_exit: return rc; } static void cc_update_aead_mlli_nents(struct cc_drvdata *drvdata, struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); u32 curr_mlli_size = 0; if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI) { areq_ctx->assoc.sram_addr = drvdata->mlli_sram_addr; curr_mlli_size = areq_ctx->assoc.mlli_nents * LLI_ENTRY_BYTE_SIZE; } if (areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { /*Inplace case dst nents equal to src nents*/ if (req->src == req->dst) { areq_ctx->dst.mlli_nents = areq_ctx->src.mlli_nents; areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + curr_mlli_size; areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr; if (!areq_ctx->is_single_pass) areq_ctx->assoc.mlli_nents += areq_ctx->src.mlli_nents; } else { if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT) { areq_ctx->src.sram_addr = drvdata->mlli_sram_addr + curr_mlli_size; areq_ctx->dst.sram_addr = areq_ctx->src.sram_addr + areq_ctx->src.mlli_nents * LLI_ENTRY_BYTE_SIZE; if (!areq_ctx->is_single_pass) areq_ctx->assoc.mlli_nents += areq_ctx->src.mlli_nents; } else { areq_ctx->dst.sram_addr = drvdata->mlli_sram_addr + curr_mlli_size; areq_ctx->src.sram_addr = areq_ctx->dst.sram_addr + areq_ctx->dst.mlli_nents * LLI_ENTRY_BYTE_SIZE; if (!areq_ctx->is_single_pass) areq_ctx->assoc.mlli_nents += areq_ctx->dst.mlli_nents; } } } } int cc_map_aead_request(struct cc_drvdata *drvdata, struct aead_request *req) { struct aead_req_ctx *areq_ctx = aead_request_ctx_dma(req); struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct device *dev = drvdata_to_dev(drvdata); struct buffer_array sg_data; unsigned int authsize = areq_ctx->req_authsize; int rc = 0; dma_addr_t dma_addr; u32 mapped_nents = 0; u32 dummy = 0; /*used for the assoc data fragments */ u32 size_to_map; gfp_t flags = cc_gfp_flags(&req->base); mlli_params->curr_pool = NULL; sg_data.num_of_buffers = 0; /* copy mac to a temporary location to deal with possible * data memory overriding that caused by cache coherence problem. */ if (drvdata->coherent && areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_DECRYPT && req->src == req->dst) cc_copy_mac(dev, req, CC_SG_TO_BUF); /* cacluate the size for cipher remove ICV in decrypt*/ areq_ctx->cryptlen = (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) ? req->cryptlen : (req->cryptlen - authsize); dma_addr = dma_map_single(dev, areq_ctx->mac_buf, MAX_MAC_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", MAX_MAC_SIZE, areq_ctx->mac_buf); rc = -ENOMEM; goto aead_map_failure; } areq_ctx->mac_buf_dma_addr = dma_addr; if (areq_ctx->ccm_hdr_size != ccm_header_size_null) { void *addr = areq_ctx->ccm_config + CCM_CTR_COUNT_0_OFFSET; dma_addr = dma_map_single(dev, addr, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping mac_buf %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, addr); areq_ctx->ccm_iv0_dma_addr = 0; rc = -ENOMEM; goto aead_map_failure; } areq_ctx->ccm_iv0_dma_addr = dma_addr; rc = cc_set_aead_conf_buf(dev, areq_ctx, areq_ctx->ccm_config, &sg_data, areq_ctx->assoclen); if (rc) goto aead_map_failure; } if (areq_ctx->cipher_mode == DRV_CIPHER_GCTR) { dma_addr = dma_map_single(dev, areq_ctx->hkey, AES_BLOCK_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping hkey %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, areq_ctx->hkey); rc = -ENOMEM; goto aead_map_failure; } areq_ctx->hkey_dma_addr = dma_addr; dma_addr = dma_map_single(dev, &areq_ctx->gcm_len_block, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping gcm_len_block %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, &areq_ctx->gcm_len_block); rc = -ENOMEM; goto aead_map_failure; } areq_ctx->gcm_block_len_dma_addr = dma_addr; dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc1, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping gcm_iv_inc1 %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc1)); areq_ctx->gcm_iv_inc1_dma_addr = 0; rc = -ENOMEM; goto aead_map_failure; } areq_ctx->gcm_iv_inc1_dma_addr = dma_addr; dma_addr = dma_map_single(dev, areq_ctx->gcm_iv_inc2, AES_BLOCK_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(dev, dma_addr)) { dev_err(dev, "Mapping gcm_iv_inc2 %u B at va=%pK for DMA failed\n", AES_BLOCK_SIZE, (areq_ctx->gcm_iv_inc2)); areq_ctx->gcm_iv_inc2_dma_addr = 0; rc = -ENOMEM; goto aead_map_failure; } areq_ctx->gcm_iv_inc2_dma_addr = dma_addr; } size_to_map = req->cryptlen + req->assoclen; /* If we do in-place encryption, we also need the auth tag */ if ((areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT) && (req->src == req->dst)) { size_to_map += authsize; } rc = cc_map_sg(dev, req->src, size_to_map, (req->src != req->dst ? DMA_TO_DEVICE : DMA_BIDIRECTIONAL), &areq_ctx->src.mapped_nents, (LLI_MAX_NUM_OF_ASSOC_DATA_ENTRIES + LLI_MAX_NUM_OF_DATA_ENTRIES), &dummy, &mapped_nents); if (rc) goto aead_map_failure; if (areq_ctx->is_single_pass) { /* * Create MLLI table for: * (1) Assoc. data * (2) Src/Dst SGLs * Note: IV is contg. buffer (not an SGL) */ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, true, false); if (rc) goto aead_map_failure; rc = cc_aead_chain_iv(drvdata, req, &sg_data, true, false); if (rc) goto aead_map_failure; rc = cc_aead_chain_data(drvdata, req, &sg_data, true, false); if (rc) goto aead_map_failure; } else { /* DOUBLE-PASS flow */ /* * Prepare MLLI table(s) in this order: * * If ENCRYPT/DECRYPT (inplace): * (1) MLLI table for assoc * (2) IV entry (chained right after end of assoc) * (3) MLLI for src/dst (inplace operation) * * If ENCRYPT (non-inplace) * (1) MLLI table for assoc * (2) IV entry (chained right after end of assoc) * (3) MLLI for dst * (4) MLLI for src * * If DECRYPT (non-inplace) * (1) MLLI table for assoc * (2) IV entry (chained right after end of assoc) * (3) MLLI for src * (4) MLLI for dst */ rc = cc_aead_chain_assoc(drvdata, req, &sg_data, false, true); if (rc) goto aead_map_failure; rc = cc_aead_chain_iv(drvdata, req, &sg_data, false, true); if (rc) goto aead_map_failure; rc = cc_aead_chain_data(drvdata, req, &sg_data, true, true); if (rc) goto aead_map_failure; } /* Mlli support -start building the MLLI according to the above * results */ if (areq_ctx->assoc_buff_type == CC_DMA_BUF_MLLI || areq_ctx->data_buff_type == CC_DMA_BUF_MLLI) { mlli_params->curr_pool = drvdata->mlli_buffs_pool; rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto aead_map_failure; cc_update_aead_mlli_nents(drvdata, req); dev_dbg(dev, "assoc params mn %d\n", areq_ctx->assoc.mlli_nents); dev_dbg(dev, "src params mn %d\n", areq_ctx->src.mlli_nents); dev_dbg(dev, "dst params mn %d\n", areq_ctx->dst.mlli_nents); } return 0; aead_map_failure: cc_unmap_aead_request(dev, req); return rc; } int cc_map_hash_request_final(struct cc_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, bool do_update, gfp_t flags) { struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; struct device *dev = drvdata_to_dev(drvdata); u8 *curr_buff = cc_hash_buf(areq_ctx); u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); struct mlli_params *mlli_params = &areq_ctx->mlli_params; struct buffer_array sg_data; int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; dev_dbg(dev, "final params : curr_buff=%pK curr_buff_cnt=0x%X nbytes = 0x%X src=%pK curr_index=%u\n", curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); /* Init the type of the dma buffer */ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; mlli_params->curr_pool = NULL; sg_data.num_of_buffers = 0; areq_ctx->in_nents = 0; if (nbytes == 0 && *curr_buff_cnt == 0) { /* nothing to do */ return 0; } /* map the previous buffer */ if (*curr_buff_cnt) { rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, &sg_data); if (rc) return rc; } if (src && nbytes > 0 && do_update) { rc = cc_map_sg(dev, src, nbytes, DMA_TO_DEVICE, &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto unmap_curr_buff; if (src && mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { memcpy(areq_ctx->buff_sg, src, sizeof(struct scatterlist)); areq_ctx->buff_sg->length = nbytes; areq_ctx->curr_sg = areq_ctx->buff_sg; areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; } else { areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; } } /*build mlli */ if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { mlli_params->curr_pool = drvdata->mlli_buffs_pool; /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, nbytes, 0, true, &areq_ctx->mlli_nents); rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto fail_unmap_din; } /* change the buffer index for the unmap function */ areq_ctx->buff_index = (areq_ctx->buff_index ^ 1); dev_dbg(dev, "areq_ctx->data_dma_buf_type = %s\n", cc_dma_buf_type(areq_ctx->data_dma_buf_type)); return 0; fail_unmap_din: dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); unmap_curr_buff: if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); return rc; } int cc_map_hash_request_update(struct cc_drvdata *drvdata, void *ctx, struct scatterlist *src, unsigned int nbytes, unsigned int block_size, gfp_t flags) { struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; struct device *dev = drvdata_to_dev(drvdata); u8 *curr_buff = cc_hash_buf(areq_ctx); u32 *curr_buff_cnt = cc_hash_buf_cnt(areq_ctx); u8 *next_buff = cc_next_buf(areq_ctx); u32 *next_buff_cnt = cc_next_buf_cnt(areq_ctx); struct mlli_params *mlli_params = &areq_ctx->mlli_params; unsigned int update_data_len; u32 total_in_len = nbytes + *curr_buff_cnt; struct buffer_array sg_data; unsigned int swap_index = 0; int rc = 0; u32 dummy = 0; u32 mapped_nents = 0; dev_dbg(dev, " update params : curr_buff=%pK curr_buff_cnt=0x%X nbytes=0x%X src=%pK curr_index=%u\n", curr_buff, *curr_buff_cnt, nbytes, src, areq_ctx->buff_index); /* Init the type of the dma buffer */ areq_ctx->data_dma_buf_type = CC_DMA_BUF_NULL; mlli_params->curr_pool = NULL; areq_ctx->curr_sg = NULL; sg_data.num_of_buffers = 0; areq_ctx->in_nents = 0; if (total_in_len < block_size) { dev_dbg(dev, " less than one block: curr_buff=%pK *curr_buff_cnt=0x%X copy_to=%pK\n", curr_buff, *curr_buff_cnt, &curr_buff[*curr_buff_cnt]); areq_ctx->in_nents = sg_nents_for_len(src, nbytes); sg_copy_to_buffer(src, areq_ctx->in_nents, &curr_buff[*curr_buff_cnt], nbytes); *curr_buff_cnt += nbytes; return 1; } /* Calculate the residue size*/ *next_buff_cnt = total_in_len & (block_size - 1); /* update data len */ update_data_len = total_in_len - *next_buff_cnt; dev_dbg(dev, " temp length : *next_buff_cnt=0x%X update_data_len=0x%X\n", *next_buff_cnt, update_data_len); /* Copy the new residue to next buffer */ if (*next_buff_cnt) { dev_dbg(dev, " handle residue: next buff %pK skip data %u residue %u\n", next_buff, (update_data_len - *curr_buff_cnt), *next_buff_cnt); cc_copy_sg_portion(dev, next_buff, src, (update_data_len - *curr_buff_cnt), nbytes, CC_SG_TO_BUF); /* change the buffer index for next operation */ swap_index = 1; } if (*curr_buff_cnt) { rc = cc_set_hash_buf(dev, areq_ctx, curr_buff, *curr_buff_cnt, &sg_data); if (rc) return rc; /* change the buffer index for next operation */ swap_index = 1; } if (update_data_len > *curr_buff_cnt) { rc = cc_map_sg(dev, src, (update_data_len - *curr_buff_cnt), DMA_TO_DEVICE, &areq_ctx->in_nents, LLI_MAX_NUM_OF_DATA_ENTRIES, &dummy, &mapped_nents); if (rc) goto unmap_curr_buff; if (mapped_nents == 1 && areq_ctx->data_dma_buf_type == CC_DMA_BUF_NULL) { /* only one entry in the SG and no previous data */ memcpy(areq_ctx->buff_sg, src, sizeof(struct scatterlist)); areq_ctx->buff_sg->length = update_data_len; areq_ctx->data_dma_buf_type = CC_DMA_BUF_DLLI; areq_ctx->curr_sg = areq_ctx->buff_sg; } else { areq_ctx->data_dma_buf_type = CC_DMA_BUF_MLLI; } } if (areq_ctx->data_dma_buf_type == CC_DMA_BUF_MLLI) { mlli_params->curr_pool = drvdata->mlli_buffs_pool; /* add the src data to the sg_data */ cc_add_sg_entry(dev, &sg_data, areq_ctx->in_nents, src, (update_data_len - *curr_buff_cnt), 0, true, &areq_ctx->mlli_nents); rc = cc_generate_mlli(dev, &sg_data, mlli_params, flags); if (rc) goto fail_unmap_din; } areq_ctx->buff_index = (areq_ctx->buff_index ^ swap_index); return 0; fail_unmap_din: dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); unmap_curr_buff: if (*curr_buff_cnt) dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); return rc; } void cc_unmap_hash_request(struct device *dev, void *ctx, struct scatterlist *src, bool do_revert) { struct ahash_req_ctx *areq_ctx = (struct ahash_req_ctx *)ctx; u32 *prev_len = cc_next_buf_cnt(areq_ctx); /*In case a pool was set, a table was *allocated and should be released */ if (areq_ctx->mlli_params.curr_pool) { dev_dbg(dev, "free MLLI buffer: dma=%pad virt=%pK\n", &areq_ctx->mlli_params.mlli_dma_addr, areq_ctx->mlli_params.mlli_virt_addr); dma_pool_free(areq_ctx->mlli_params.curr_pool, areq_ctx->mlli_params.mlli_virt_addr, areq_ctx->mlli_params.mlli_dma_addr); } if (src && areq_ctx->in_nents) { dev_dbg(dev, "Unmapped sg src: virt=%pK dma=%pad len=0x%X\n", sg_virt(src), &sg_dma_address(src), sg_dma_len(src)); dma_unmap_sg(dev, src, areq_ctx->in_nents, DMA_TO_DEVICE); } if (*prev_len) { dev_dbg(dev, "Unmapped buffer: areq_ctx->buff_sg=%pK dma=%pad len 0x%X\n", sg_virt(areq_ctx->buff_sg), &sg_dma_address(areq_ctx->buff_sg), sg_dma_len(areq_ctx->buff_sg)); dma_unmap_sg(dev, areq_ctx->buff_sg, 1, DMA_TO_DEVICE); if (!do_revert) { /* clean the previous data length for update * operation */ *prev_len = 0; } else { areq_ctx->buff_index ^= 1; } } } int cc_buffer_mgr_init(struct cc_drvdata *drvdata) { struct device *dev = drvdata_to_dev(drvdata); drvdata->mlli_buffs_pool = dma_pool_create("dx_single_mlli_tables", dev, MAX_NUM_OF_TOTAL_MLLI_ENTRIES * LLI_ENTRY_BYTE_SIZE, MLLI_TABLE_MIN_ALIGNMENT, 0); if (!drvdata->mlli_buffs_pool) return -ENOMEM; return 0; } int cc_buffer_mgr_fini(struct cc_drvdata *drvdata) { dma_pool_destroy(drvdata->mlli_buffs_pool); return 0; }
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef TARGET_CORE_BACKEND_H #define TARGET_CORE_BACKEND_H #include <linux/types.h> #include <linux/unaligned.h> #include <target/target_core_base.h> #define TRANSPORT_FLAG_PASSTHROUGH 0x1 /* * ALUA commands, state checks and setup operations are handled by the * backend module. */ #define TRANSPORT_FLAG_PASSTHROUGH_ALUA 0x2 #define TRANSPORT_FLAG_PASSTHROUGH_PGR 0x4 struct block_device; struct scatterlist; struct target_backend_ops { char name[16]; char inquiry_prod[16]; char inquiry_rev[4]; struct module *owner; u8 transport_flags_default; u8 transport_flags_changeable; int (*attach_hba)(struct se_hba *, u32); void (*detach_hba)(struct se_hba *); int (*pmode_enable_hba)(struct se_hba *, unsigned long); struct se_device *(*alloc_device)(struct se_hba *, const char *); int (*configure_device)(struct se_device *); void (*destroy_device)(struct se_device *); void (*free_device)(struct se_device *device); struct se_dev_plug *(*plug_device)(struct se_device *se_dev); void (*unplug_device)(struct se_dev_plug *se_plug); bool (*configure_unmap)(struct se_device *se_dev); ssize_t (*set_configfs_dev_params)(struct se_device *, const char *, ssize_t); ssize_t (*show_configfs_dev_params)(struct se_device *, char *); sense_reason_t (*parse_cdb)(struct se_cmd *cmd); void (*tmr_notify)(struct se_device *se_dev, enum tcm_tmreq_table, struct list_head *aborted_cmds); u32 (*get_device_type)(struct se_device *); sector_t (*get_blocks)(struct se_device *); sector_t (*get_alignment_offset_lbas)(struct se_device *); /* lbppbe = logical blocks per physical block exponent. see SBC-3 */ unsigned int (*get_lbppbe)(struct se_device *); unsigned int (*get_io_min)(struct se_device *); unsigned int (*get_io_opt)(struct se_device *); unsigned char *(*get_sense_buffer)(struct se_cmd *); bool (*get_write_cache)(struct se_device *); int (*init_prot)(struct se_device *); int (*format_prot)(struct se_device *); void (*free_prot)(struct se_device *); struct configfs_attribute **tb_dev_attrib_attrs; struct configfs_attribute **tb_dev_action_attrs; }; struct exec_cmd_ops { sense_reason_t (*execute_rw)(struct se_cmd *cmd, struct scatterlist *, u32, enum dma_data_direction); sense_reason_t (*execute_sync_cache)(struct se_cmd *cmd); sense_reason_t (*execute_write_same)(struct se_cmd *cmd); sense_reason_t (*execute_unmap)(struct se_cmd *cmd, sector_t lba, sector_t nolb); sense_reason_t (*execute_pr_out)(struct se_cmd *cmd, u8 sa, u64 key, u64 sa_key, u8 type, bool aptpl); sense_reason_t (*execute_pr_in)(struct se_cmd *cmd, u8 sa, unsigned char *param_data); }; int transport_backend_register(const struct target_backend_ops *); void target_backend_unregister(const struct target_backend_ops *); void target_complete_cmd(struct se_cmd *, u8); void target_set_cmd_data_length(struct se_cmd *, int); void target_complete_cmd_with_sense(struct se_cmd *, u8, sense_reason_t); void target_complete_cmd_with_length(struct se_cmd *, u8, int); void transport_copy_sense_to_cmd(struct se_cmd *, unsigned char *); sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size); sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd); sense_reason_t spc_emulate_inquiry_std(struct se_cmd *, unsigned char *); sense_reason_t spc_emulate_evpd_83(struct se_cmd *, unsigned char *); sense_reason_t sbc_parse_cdb(struct se_cmd *cmd, struct exec_cmd_ops *ops); u32 sbc_get_device_rev(struct se_device *dev); u32 sbc_get_device_type(struct se_device *dev); sector_t sbc_get_write_same_sectors(struct se_cmd *cmd); void sbc_dif_generate(struct se_cmd *); sense_reason_t sbc_dif_verify(struct se_cmd *, sector_t, unsigned int, unsigned int, struct scatterlist *, int); void sbc_dif_copy_prot(struct se_cmd *, unsigned int, bool, struct scatterlist *, int); void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *); int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *); int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *); int transport_set_vpd_ident(struct t10_vpd *, unsigned char *); extern struct configfs_attribute *sbc_attrib_attrs[]; extern struct configfs_attribute *passthrough_attrib_attrs[]; extern struct configfs_attribute *passthrough_pr_attrib_attrs[]; /* core helpers also used by command snooping in pscsi */ void *transport_kmap_data_sg(struct se_cmd *); void transport_kunmap_data_sg(struct se_cmd *); /* core helpers also used by xcopy during internal command setup */ sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *, struct scatterlist *, u32, struct scatterlist *, u32); bool target_lun_is_rdonly(struct se_cmd *); sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd, sense_reason_t (*exec_cmd)(struct se_cmd *cmd)); bool target_sense_desc_format(struct se_device *dev); sector_t target_to_linux_sector(struct se_device *dev, sector_t lb); bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib, struct block_device *bdev); static inline bool target_dev_configured(struct se_device *se_dev) { return !!(se_dev->dev_flags & DF_CONFIGURED); } #endif /* TARGET_CORE_BACKEND_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* * string.h: External definitions for optimized assembly string * routines for the Linux Kernel. * * Copyright (C) 1995,1996 David S. Miller ([email protected]) * Copyright (C) 1996,1997,1999 Jakub Jelinek ([email protected]) */ #ifndef __SPARC64_STRING_H__ #define __SPARC64_STRING_H__ #include <asm/asi.h> #endif /* !(__SPARC64_STRING_H__) */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * The On Chip Memory (OCMEM) allocator allows various clients to allocate * memory from OCMEM based on performance, latency and power requirements. * This is typically used by the GPU, camera/video, and audio components on * some Snapdragon SoCs. * * Copyright (C) 2019 Brian Masney <[email protected]> * Copyright (C) 2015 Red Hat. Author: Rob Clark <[email protected]> */ #include <linux/device.h> #include <linux/err.h> #ifndef __OCMEM_H__ #define __OCMEM_H__ enum ocmem_client { /* GMEM clients */ OCMEM_GRAPHICS = 0x0, /* * TODO add more once ocmem_allocate() is clever enough to * deal with multiple clients. */ OCMEM_CLIENT_MAX, }; struct ocmem; struct ocmem_buf { unsigned long offset; unsigned long addr; unsigned long len; }; #if IS_ENABLED(CONFIG_QCOM_OCMEM) struct ocmem *of_get_ocmem(struct device *dev); struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, unsigned long size); void ocmem_free(struct ocmem *ocmem, enum ocmem_client client, struct ocmem_buf *buf); #else /* IS_ENABLED(CONFIG_QCOM_OCMEM) */ static inline struct ocmem *of_get_ocmem(struct device *dev) { return ERR_PTR(-ENODEV); } static inline struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client, unsigned long size) { return ERR_PTR(-ENODEV); } static inline void ocmem_free(struct ocmem *ocmem, enum ocmem_client client, struct ocmem_buf *buf) { } #endif /* IS_ENABLED(CONFIG_QCOM_OCMEM) */ #endif /* __OCMEM_H__ */
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */ /* QLogic qed NIC Driver * Copyright (c) 2015-2017 QLogic Corporation * Copyright (c) 2019-2020 Marvell International Ltd. */ #ifndef _QED_ISCSI_H #define _QED_ISCSI_H #include <linux/types.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/qed/tcp_common.h> #include <linux/qed/qed_iscsi_if.h> #include <linux/qed/qed_chain.h> #include "qed.h" #include "qed_hsi.h" #include "qed_mcp.h" #include "qed_sp.h" struct qed_iscsi_info { spinlock_t lock; /* Connection resources. */ struct list_head free_list; u16 max_num_outstanding_tasks; void *event_context; iscsi_event_cb_t event_cb; }; #if IS_ENABLED(CONFIG_QED_ISCSI) int qed_iscsi_alloc(struct qed_hwfn *p_hwfn); void qed_iscsi_setup(struct qed_hwfn *p_hwfn); void qed_iscsi_free(struct qed_hwfn *p_hwfn); /** * qed_get_protocol_stats_iscsi(): Fills provided statistics * struct with statistics. * * @cdev: Qed dev pointer. * @stats: Points to struct that will be filled with statistics. * @is_atomic: Hint from the caller - if the func can sleep or not. * * Context: The function should not sleep in case is_atomic == true. * Return: Void. */ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats, bool is_atomic); #else /* IS_ENABLED(CONFIG_QED_ISCSI) */ static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn) { return -EINVAL; } static inline void qed_iscsi_setup(struct qed_hwfn *p_hwfn) {} static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {} static inline void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats, bool is_atomic) {} #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ #endif
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2012 Freescale Semiconductor, Inc. */ #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include "clk.h" /** * struct clk_ref - mxs reference clock * @hw: clk_hw for the reference clock * @reg: register address * @idx: the index of the reference clock within the same register * * The mxs reference clock sources from pll. Every 4 reference clocks share * one register space, and @idx is used to identify them. Each reference * clock has a gate control and a fractional * divider. The rate is calculated * as pll rate * (18 / FRAC), where FRAC = 18 ~ 35. */ struct clk_ref { struct clk_hw hw; void __iomem *reg; u8 idx; }; #define to_clk_ref(_hw) container_of(_hw, struct clk_ref, hw) static int clk_ref_enable(struct clk_hw *hw) { struct clk_ref *ref = to_clk_ref(hw); writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR); return 0; } static void clk_ref_disable(struct clk_hw *hw) { struct clk_ref *ref = to_clk_ref(hw); writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET); } static unsigned long clk_ref_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_ref *ref = to_clk_ref(hw); u64 tmp = parent_rate; u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f; tmp *= 18; do_div(tmp, frac); return tmp; } static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { unsigned long parent_rate = *prate; u64 tmp = parent_rate; u8 frac; tmp = tmp * 18 + rate / 2; do_div(tmp, rate); frac = clamp(tmp, 18, 35); tmp = parent_rate; tmp *= 18; do_div(tmp, frac); return tmp; } static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_ref *ref = to_clk_ref(hw); unsigned long flags; u64 tmp = parent_rate; u32 val; u8 frac, shift = ref->idx * 8; tmp = tmp * 18 + rate / 2; do_div(tmp, rate); frac = clamp(tmp, 18, 35); spin_lock_irqsave(&mxs_lock, flags); val = readl_relaxed(ref->reg); val &= ~(0x3f << shift); val |= frac << shift; writel_relaxed(val, ref->reg); spin_unlock_irqrestore(&mxs_lock, flags); return 0; } static const struct clk_ops clk_ref_ops = { .enable = clk_ref_enable, .disable = clk_ref_disable, .recalc_rate = clk_ref_recalc_rate, .round_rate = clk_ref_round_rate, .set_rate = clk_ref_set_rate, }; struct clk *mxs_clk_ref(const char *name, const char *parent_name, void __iomem *reg, u8 idx) { struct clk_ref *ref; struct clk *clk; struct clk_init_data init; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &clk_ref_ops; init.flags = 0; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); ref->reg = reg; ref->idx = idx; ref->hw.init = &init; clk = clk_register(NULL, &ref->hw); if (IS_ERR(clk)) kfree(ref); return clk; }
// SPDX-License-Identifier: MIT /* * Copyright © 2021 Intel Corporation */ #include <linux/kernel.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/string.h> #include "i915_driver.h" #include "i915_drv.h" #include "i915_mitigations.h" static unsigned long mitigations __read_mostly = ~0UL; enum { CLEAR_RESIDUALS = 0, }; static const char * const names[] = { [CLEAR_RESIDUALS] = "residuals", }; bool i915_mitigate_clear_residuals(void) { return READ_ONCE(mitigations) & BIT(CLEAR_RESIDUALS); } static int mitigations_set(const char *val, const struct kernel_param *kp) { unsigned long new = ~0UL; char *str, *sep, *tok; bool first = true; int err = 0; BUILD_BUG_ON(ARRAY_SIZE(names) >= BITS_PER_TYPE(mitigations)); str = kstrdup(val, GFP_KERNEL); if (!str) return -ENOMEM; for (sep = str; (tok = strsep(&sep, ","));) { bool enable = true; int i; /* Be tolerant of leading/trailing whitespace */ tok = strim(tok); if (first) { first = false; if (!strcmp(tok, "auto")) continue; new = 0; if (!strcmp(tok, "off")) continue; } if (*tok == '!') { enable = !enable; tok++; } if (!strncmp(tok, "no", 2)) { enable = !enable; tok += 2; } if (*tok == '\0') continue; for (i = 0; i < ARRAY_SIZE(names); i++) { if (!strcmp(tok, names[i])) { if (enable) new |= BIT(i); else new &= ~BIT(i); break; } } if (i == ARRAY_SIZE(names)) { pr_err("Bad \"%s.mitigations=%s\", '%s' is unknown\n", DRIVER_NAME, val, tok); err = -EINVAL; break; } } kfree(str); if (err) return err; WRITE_ONCE(mitigations, new); return 0; } static int mitigations_get(char *buffer, const struct kernel_param *kp) { unsigned long local = READ_ONCE(mitigations); int count, i; bool enable; if (!local) return scnprintf(buffer, PAGE_SIZE, "%s\n", "off"); if (local & BIT(BITS_PER_LONG - 1)) { count = scnprintf(buffer, PAGE_SIZE, "%s,", "auto"); enable = false; } else { enable = true; count = 0; } for (i = 0; i < ARRAY_SIZE(names); i++) { if ((local & BIT(i)) != enable) continue; count += scnprintf(buffer + count, PAGE_SIZE - count, "%s%s,", enable ? "" : "!", names[i]); } buffer[count - 1] = '\n'; return count; } static const struct kernel_param_ops ops = { .set = mitigations_set, .get = mitigations_get, }; module_param_cb_unsafe(mitigations, &ops, NULL, 0600); MODULE_PARM_DESC(mitigations, "Selectively enable security mitigations for all Intel® GPUs in the system.\n" "\n" " auto -- enables all mitigations required for the platform [default]\n" " off -- disables all mitigations\n" "\n" "Individual mitigations can be enabled by passing a comma-separated string,\n" "e.g. mitigations=residuals to enable only clearing residuals or\n" "mitigations=auto,noresiduals to disable only the clear residual mitigation.\n" "Either '!' or 'no' may be used to switch from enabling the mitigation to\n" "disabling it.\n" "\n" "Active mitigations for Ivybridge, Baytrail, Haswell:\n" " residuals -- clear all thread-local registers between contexts" );
/* SPDX-License-Identifier: GPL-2.0 * * Copyright 2016-2020 HabanaLabs, Ltd. * All Rights Reserved. * */ /************************************ ** This is an auto-generated file ** ** DO NOT EDIT BELOW ** ************************************/ #ifndef ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_REGS_H_ #define ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_REGS_H_ /* ***************************************** * DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START * (Prototype: MME_NON_TENSOR_DESCRIPTOR_START) ***************************************** */ #define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BRAINS_LOW 0x40CB028 #define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_BRAINS_HIGH 0x40CB02C #define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_HEADER_LOW 0x40CB030 #define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_HEADER_HIGH 0x40CB034 #define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_EUS_MASTER 0x40CB038 #define mmDCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_EUS_SLAVE 0x40CB03C #endif /* ASIC_REG_DCORE0_MME_CTRL_LO_ARCH_NON_TENSOR_START_REGS_H_ */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * TS3A227E Autonous Audio Accessory Detection and Configureation Switch * * Copyright (C) 2014 Google, Inc. */ #ifndef _TS3A227E_H #define _TS3A227E_H int ts3a227e_enable_jack_detect(struct snd_soc_component *component, struct snd_soc_jack *jack); #endif
/* * Wacom W8001 penabled serial touchscreen driver * * Copyright (c) 2008 Jaya Kumar * Copyright (c) 2010 Red Hat, Inc. * Copyright (c) 2010 - 2011 Ping Cheng, Wacom. <[email protected]> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Layout based on Elo serial touchscreen driver by Vojtech Pavlik */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input/mt.h> #include <linux/serio.h> #include <linux/ctype.h> #include <linux/delay.h> #define DRIVER_DESC "Wacom W8001 serial touchscreen driver" MODULE_AUTHOR("Jaya Kumar <[email protected]>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define W8001_MAX_PHYS 42 #define W8001_MAX_LENGTH 13 #define W8001_LEAD_MASK 0x80 #define W8001_LEAD_BYTE 0x80 #define W8001_TAB_MASK 0x40 #define W8001_TAB_BYTE 0x40 /* set in first byte of touch data packets */ #define W8001_TOUCH_MASK (0x10 | W8001_LEAD_MASK) #define W8001_TOUCH_BYTE (0x10 | W8001_LEAD_BYTE) #define W8001_QUERY_PACKET 0x20 #define W8001_CMD_STOP '0' #define W8001_CMD_START '1' #define W8001_CMD_QUERY '*' #define W8001_CMD_TOUCHQUERY '%' /* length of data packets in bytes, depends on device. */ #define W8001_PKTLEN_TOUCH93 5 #define W8001_PKTLEN_TOUCH9A 7 #define W8001_PKTLEN_TPCPEN 9 #define W8001_PKTLEN_TPCCTL 11 /* control packet */ #define W8001_PKTLEN_TOUCH2FG 13 /* resolution in points/mm */ #define W8001_PEN_RESOLUTION 100 #define W8001_TOUCH_RESOLUTION 10 struct w8001_coord { u8 rdy; u8 tsw; u8 f1; u8 f2; u16 x; u16 y; u16 pen_pressure; u8 tilt_x; u8 tilt_y; }; /* touch query reply packet */ struct w8001_touch_query { u16 x; u16 y; u8 panel_res; u8 capacity_res; u8 sensor_id; }; /* * Per-touchscreen data. */ struct w8001 { struct input_dev *pen_dev; struct input_dev *touch_dev; struct serio *serio; struct completion cmd_done; int id; int idx; unsigned char response_type; unsigned char response[W8001_MAX_LENGTH]; unsigned char data[W8001_MAX_LENGTH]; char phys[W8001_MAX_PHYS]; int type; unsigned int pktlen; u16 max_touch_x; u16 max_touch_y; u16 max_pen_x; u16 max_pen_y; char pen_name[64]; char touch_name[64]; int open_count; struct mutex mutex; }; static void parse_pen_data(u8 *data, struct w8001_coord *coord) { memset(coord, 0, sizeof(*coord)); coord->rdy = data[0] & 0x20; coord->tsw = data[0] & 0x01; coord->f1 = data[0] & 0x02; coord->f2 = data[0] & 0x04; coord->x = (data[1] & 0x7F) << 9; coord->x |= (data[2] & 0x7F) << 2; coord->x |= (data[6] & 0x60) >> 5; coord->y = (data[3] & 0x7F) << 9; coord->y |= (data[4] & 0x7F) << 2; coord->y |= (data[6] & 0x18) >> 3; coord->pen_pressure = data[5] & 0x7F; coord->pen_pressure |= (data[6] & 0x07) << 7 ; coord->tilt_x = data[7] & 0x7F; coord->tilt_y = data[8] & 0x7F; } static void parse_single_touch(u8 *data, struct w8001_coord *coord) { coord->x = (data[1] << 7) | data[2]; coord->y = (data[3] << 7) | data[4]; coord->tsw = data[0] & 0x01; } static void scale_touch_coordinates(struct w8001 *w8001, unsigned int *x, unsigned int *y) { if (w8001->max_pen_x && w8001->max_touch_x) *x = *x * w8001->max_pen_x / w8001->max_touch_x; if (w8001->max_pen_y && w8001->max_touch_y) *y = *y * w8001->max_pen_y / w8001->max_touch_y; } static void parse_multi_touch(struct w8001 *w8001) { struct input_dev *dev = w8001->touch_dev; unsigned char *data = w8001->data; unsigned int x, y; int i; int count = 0; for (i = 0; i < 2; i++) { bool touch = data[0] & (1 << i); input_mt_slot(dev, i); input_mt_report_slot_state(dev, MT_TOOL_FINGER, touch); if (touch) { x = (data[6 * i + 1] << 7) | data[6 * i + 2]; y = (data[6 * i + 3] << 7) | data[6 * i + 4]; /* data[5,6] and [11,12] is finger capacity */ /* scale to pen maximum */ scale_touch_coordinates(w8001, &x, &y); input_report_abs(dev, ABS_MT_POSITION_X, x); input_report_abs(dev, ABS_MT_POSITION_Y, y); count++; } } /* emulate single touch events when stylus is out of proximity. * This is to make single touch backward support consistent * across all Wacom single touch devices. */ if (w8001->type != BTN_TOOL_PEN && w8001->type != BTN_TOOL_RUBBER) { w8001->type = count == 1 ? BTN_TOOL_FINGER : KEY_RESERVED; input_mt_report_pointer_emulation(dev, true); } input_sync(dev); } static void parse_touchquery(u8 *data, struct w8001_touch_query *query) { memset(query, 0, sizeof(*query)); query->panel_res = data[1]; query->sensor_id = data[2] & 0x7; query->capacity_res = data[7]; query->x = data[3] << 9; query->x |= data[4] << 2; query->x |= (data[2] >> 5) & 0x3; query->y = data[5] << 9; query->y |= data[6] << 2; query->y |= (data[2] >> 3) & 0x3; /* Early days' single-finger touch models need the following defaults */ if (!query->x && !query->y) { query->x = 1024; query->y = 1024; if (query->panel_res) query->x = query->y = (1 << query->panel_res); query->panel_res = W8001_TOUCH_RESOLUTION; } } static void report_pen_events(struct w8001 *w8001, struct w8001_coord *coord) { struct input_dev *dev = w8001->pen_dev; /* * We have 1 bit for proximity (rdy) and 3 bits for tip, side, * side2/eraser. If rdy && f2 are set, this can be either pen + side2, * or eraser. Assume: * - if dev is already in proximity and f2 is toggled → pen + side2 * - if dev comes into proximity with f2 set → eraser * If f2 disappears after assuming eraser, fake proximity out for * eraser and in for pen. */ switch (w8001->type) { case BTN_TOOL_RUBBER: if (!coord->f2) { input_report_abs(dev, ABS_PRESSURE, 0); input_report_key(dev, BTN_TOUCH, 0); input_report_key(dev, BTN_STYLUS, 0); input_report_key(dev, BTN_STYLUS2, 0); input_report_key(dev, BTN_TOOL_RUBBER, 0); input_sync(dev); w8001->type = BTN_TOOL_PEN; } break; case BTN_TOOL_FINGER: case KEY_RESERVED: w8001->type = coord->f2 ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; break; default: input_report_key(dev, BTN_STYLUS2, coord->f2); break; } input_report_abs(dev, ABS_X, coord->x); input_report_abs(dev, ABS_Y, coord->y); input_report_abs(dev, ABS_PRESSURE, coord->pen_pressure); input_report_key(dev, BTN_TOUCH, coord->tsw); input_report_key(dev, BTN_STYLUS, coord->f1); input_report_key(dev, w8001->type, coord->rdy); input_sync(dev); if (!coord->rdy) w8001->type = KEY_RESERVED; } static void report_single_touch(struct w8001 *w8001, struct w8001_coord *coord) { struct input_dev *dev = w8001->touch_dev; unsigned int x = coord->x; unsigned int y = coord->y; /* scale to pen maximum */ scale_touch_coordinates(w8001, &x, &y); input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); input_report_key(dev, BTN_TOUCH, coord->tsw); input_sync(dev); w8001->type = coord->tsw ? BTN_TOOL_FINGER : KEY_RESERVED; } static irqreturn_t w8001_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct w8001 *w8001 = serio_get_drvdata(serio); struct w8001_coord coord; unsigned char tmp; w8001->data[w8001->idx] = data; switch (w8001->idx++) { case 0: if ((data & W8001_LEAD_MASK) != W8001_LEAD_BYTE) { pr_debug("w8001: unsynchronized data: 0x%02x\n", data); w8001->idx = 0; } break; case W8001_PKTLEN_TOUCH93 - 1: case W8001_PKTLEN_TOUCH9A - 1: tmp = w8001->data[0] & W8001_TOUCH_BYTE; if (tmp != W8001_TOUCH_BYTE) break; if (w8001->pktlen == w8001->idx) { w8001->idx = 0; if (w8001->type != BTN_TOOL_PEN && w8001->type != BTN_TOOL_RUBBER) { parse_single_touch(w8001->data, &coord); report_single_touch(w8001, &coord); } } break; /* Pen coordinates packet */ case W8001_PKTLEN_TPCPEN - 1: tmp = w8001->data[0] & W8001_TAB_MASK; if (unlikely(tmp == W8001_TAB_BYTE)) break; tmp = w8001->data[0] & W8001_TOUCH_BYTE; if (tmp == W8001_TOUCH_BYTE) break; w8001->idx = 0; parse_pen_data(w8001->data, &coord); report_pen_events(w8001, &coord); break; /* control packet */ case W8001_PKTLEN_TPCCTL - 1: tmp = w8001->data[0] & W8001_TOUCH_MASK; if (tmp == W8001_TOUCH_BYTE) break; w8001->idx = 0; memcpy(w8001->response, w8001->data, W8001_MAX_LENGTH); w8001->response_type = W8001_QUERY_PACKET; complete(&w8001->cmd_done); break; /* 2 finger touch packet */ case W8001_PKTLEN_TOUCH2FG - 1: w8001->idx = 0; parse_multi_touch(w8001); break; default: /* * ThinkPad X60 Tablet PC (pen only device) sometimes * sends invalid data packets that are larger than * W8001_PKTLEN_TPCPEN. Let's start over again. */ if (!w8001->touch_dev && w8001->idx > W8001_PKTLEN_TPCPEN - 1) w8001->idx = 0; } return IRQ_HANDLED; } static int w8001_command(struct w8001 *w8001, unsigned char command, bool wait_response) { int rc; w8001->response_type = 0; init_completion(&w8001->cmd_done); rc = serio_write(w8001->serio, command); if (rc == 0 && wait_response) { wait_for_completion_timeout(&w8001->cmd_done, HZ); if (w8001->response_type != W8001_QUERY_PACKET) rc = -EIO; } return rc; } static int w8001_open(struct input_dev *dev) { struct w8001 *w8001 = input_get_drvdata(dev); int err; scoped_guard(mutex_intr, &w8001->mutex) { if (w8001->open_count == 0) { err = w8001_command(w8001, W8001_CMD_START, false); if (err) return err; } w8001->open_count++; return 0; } return -EINTR; } static void w8001_close(struct input_dev *dev) { struct w8001 *w8001 = input_get_drvdata(dev); guard(mutex)(&w8001->mutex); if (--w8001->open_count == 0) w8001_command(w8001, W8001_CMD_STOP, false); } static int w8001_detect(struct w8001 *w8001) { int error; error = w8001_command(w8001, W8001_CMD_STOP, false); if (error) return error; msleep(250); /* wait 250ms before querying the device */ return 0; } static int w8001_setup_pen(struct w8001 *w8001, char *basename, size_t basename_sz) { struct input_dev *dev = w8001->pen_dev; struct w8001_coord coord; int error; /* penabled? */ error = w8001_command(w8001, W8001_CMD_QUERY, true); if (error) return error; __set_bit(EV_KEY, dev->evbit); __set_bit(EV_ABS, dev->evbit); __set_bit(BTN_TOUCH, dev->keybit); __set_bit(BTN_TOOL_PEN, dev->keybit); __set_bit(BTN_TOOL_RUBBER, dev->keybit); __set_bit(BTN_STYLUS, dev->keybit); __set_bit(BTN_STYLUS2, dev->keybit); __set_bit(INPUT_PROP_DIRECT, dev->propbit); parse_pen_data(w8001->response, &coord); w8001->max_pen_x = coord.x; w8001->max_pen_y = coord.y; input_set_abs_params(dev, ABS_X, 0, coord.x, 0, 0); input_set_abs_params(dev, ABS_Y, 0, coord.y, 0, 0); input_abs_set_res(dev, ABS_X, W8001_PEN_RESOLUTION); input_abs_set_res(dev, ABS_Y, W8001_PEN_RESOLUTION); input_set_abs_params(dev, ABS_PRESSURE, 0, coord.pen_pressure, 0, 0); if (coord.tilt_x && coord.tilt_y) { input_set_abs_params(dev, ABS_TILT_X, 0, coord.tilt_x, 0, 0); input_set_abs_params(dev, ABS_TILT_Y, 0, coord.tilt_y, 0, 0); } w8001->id = 0x90; strlcat(basename, " Penabled", basename_sz); return 0; } static int w8001_setup_touch(struct w8001 *w8001, char *basename, size_t basename_sz) { struct input_dev *dev = w8001->touch_dev; struct w8001_touch_query touch; int error; /* Touch enabled? */ error = w8001_command(w8001, W8001_CMD_TOUCHQUERY, true); if (error) return error; /* * Some non-touch devices may reply to the touch query. But their * second byte is empty, which indicates touch is not supported. */ if (!w8001->response[1]) return -ENXIO; __set_bit(EV_KEY, dev->evbit); __set_bit(EV_ABS, dev->evbit); __set_bit(BTN_TOUCH, dev->keybit); __set_bit(INPUT_PROP_DIRECT, dev->propbit); parse_touchquery(w8001->response, &touch); w8001->max_touch_x = touch.x; w8001->max_touch_y = touch.y; if (w8001->max_pen_x && w8001->max_pen_y) { /* if pen is supported scale to pen maximum */ touch.x = w8001->max_pen_x; touch.y = w8001->max_pen_y; touch.panel_res = W8001_PEN_RESOLUTION; } input_set_abs_params(dev, ABS_X, 0, touch.x, 0, 0); input_set_abs_params(dev, ABS_Y, 0, touch.y, 0, 0); input_abs_set_res(dev, ABS_X, touch.panel_res); input_abs_set_res(dev, ABS_Y, touch.panel_res); switch (touch.sensor_id) { case 0: case 2: w8001->pktlen = W8001_PKTLEN_TOUCH93; w8001->id = 0x93; strlcat(basename, " 1FG", basename_sz); break; case 1: case 3: case 4: w8001->pktlen = W8001_PKTLEN_TOUCH9A; strlcat(basename, " 1FG", basename_sz); w8001->id = 0x9a; break; case 5: w8001->pktlen = W8001_PKTLEN_TOUCH2FG; __set_bit(BTN_TOOL_DOUBLETAP, dev->keybit); error = input_mt_init_slots(dev, 2, 0); if (error) { dev_err(&w8001->serio->dev, "failed to initialize MT slots: %d\n", error); return error; } input_set_abs_params(dev, ABS_MT_POSITION_X, 0, touch.x, 0, 0); input_set_abs_params(dev, ABS_MT_POSITION_Y, 0, touch.y, 0, 0); input_set_abs_params(dev, ABS_MT_TOOL_TYPE, 0, MT_TOOL_MAX, 0, 0); input_abs_set_res(dev, ABS_MT_POSITION_X, touch.panel_res); input_abs_set_res(dev, ABS_MT_POSITION_Y, touch.panel_res); strlcat(basename, " 2FG", basename_sz); if (w8001->max_pen_x && w8001->max_pen_y) w8001->id = 0xE3; else w8001->id = 0xE2; break; } strlcat(basename, " Touchscreen", basename_sz); return 0; } static void w8001_set_devdata(struct input_dev *dev, struct w8001 *w8001, struct serio *serio) { dev->phys = w8001->phys; dev->id.bustype = BUS_RS232; dev->id.product = w8001->id; dev->id.vendor = 0x056a; dev->id.version = 0x0100; dev->open = w8001_open; dev->close = w8001_close; dev->dev.parent = &serio->dev; input_set_drvdata(dev, w8001); } /* * w8001_disconnect() is the opposite of w8001_connect() */ static void w8001_disconnect(struct serio *serio) { struct w8001 *w8001 = serio_get_drvdata(serio); serio_close(serio); if (w8001->pen_dev) input_unregister_device(w8001->pen_dev); if (w8001->touch_dev) input_unregister_device(w8001->touch_dev); kfree(w8001); serio_set_drvdata(serio, NULL); } /* * w8001_connect() is the routine that is called when someone adds a * new serio device that supports the w8001 protocol and registers it as * an input device. */ static int w8001_connect(struct serio *serio, struct serio_driver *drv) { struct w8001 *w8001; struct input_dev *input_dev_pen; struct input_dev *input_dev_touch; char basename[64] = "Wacom Serial"; int err, err_pen, err_touch; w8001 = kzalloc(sizeof(*w8001), GFP_KERNEL); input_dev_pen = input_allocate_device(); input_dev_touch = input_allocate_device(); if (!w8001 || !input_dev_pen || !input_dev_touch) { err = -ENOMEM; goto fail1; } w8001->serio = serio; w8001->pen_dev = input_dev_pen; w8001->touch_dev = input_dev_touch; mutex_init(&w8001->mutex); init_completion(&w8001->cmd_done); snprintf(w8001->phys, sizeof(w8001->phys), "%s/input0", serio->phys); serio_set_drvdata(serio, w8001); err = serio_open(serio, drv); if (err) goto fail2; err = w8001_detect(w8001); if (err) goto fail3; /* For backwards-compatibility we compose the basename based on * capabilities and then just append the tool type */ err_pen = w8001_setup_pen(w8001, basename, sizeof(basename)); err_touch = w8001_setup_touch(w8001, basename, sizeof(basename)); if (err_pen && err_touch) { err = -ENXIO; goto fail3; } if (!err_pen) { snprintf(w8001->pen_name, sizeof(w8001->pen_name), "%s Pen", basename); input_dev_pen->name = w8001->pen_name; w8001_set_devdata(input_dev_pen, w8001, serio); err = input_register_device(w8001->pen_dev); if (err) goto fail3; } else { input_free_device(input_dev_pen); input_dev_pen = NULL; w8001->pen_dev = NULL; } if (!err_touch) { snprintf(w8001->touch_name, sizeof(w8001->touch_name), "%s Finger", basename); input_dev_touch->name = w8001->touch_name; w8001_set_devdata(input_dev_touch, w8001, serio); err = input_register_device(w8001->touch_dev); if (err) goto fail4; } else { input_free_device(input_dev_touch); input_dev_touch = NULL; w8001->touch_dev = NULL; } return 0; fail4: if (w8001->pen_dev) input_unregister_device(w8001->pen_dev); fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev_pen); input_free_device(input_dev_touch); kfree(w8001); return err; } static const struct serio_device_id w8001_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_W8001, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, w8001_serio_ids); static struct serio_driver w8001_drv = { .driver = { .name = "w8001", }, .description = DRIVER_DESC, .id_table = w8001_serio_ids, .interrupt = w8001_interrupt, .connect = w8001_connect, .disconnect = w8001_disconnect, }; module_serio_driver(w8001_drv);
/* bnx2x_mfw_req.h: Qlogic Everest network driver. * * Copyright (c) 2012-2013 Broadcom Corporation * Copyright (c) 2014 QLogic Corporation * All rights reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #ifndef BNX2X_MFW_REQ_H #define BNX2X_MFW_REQ_H #define PORT_0 0 #define PORT_1 1 #define PORT_MAX 2 #define NVM_PATH_MAX 2 /* FCoE capabilities required from the driver */ struct fcoe_capabilities { u32 capability1; /* Maximum number of I/Os per connection */ #define FCOE_IOS_PER_CONNECTION_MASK 0x0000ffff #define FCOE_IOS_PER_CONNECTION_SHIFT 0 /* Maximum number of Logins per port */ #define FCOE_LOGINS_PER_PORT_MASK 0xffff0000 #define FCOE_LOGINS_PER_PORT_SHIFT 16 u32 capability2; /* Maximum number of exchanges */ #define FCOE_NUMBER_OF_EXCHANGES_MASK 0x0000ffff #define FCOE_NUMBER_OF_EXCHANGES_SHIFT 0 /* Maximum NPIV WWN per port */ #define FCOE_NPIV_WWN_PER_PORT_MASK 0xffff0000 #define FCOE_NPIV_WWN_PER_PORT_SHIFT 16 u32 capability3; /* Maximum number of targets supported */ #define FCOE_TARGETS_SUPPORTED_MASK 0x0000ffff #define FCOE_TARGETS_SUPPORTED_SHIFT 0 /* Maximum number of outstanding commands across all connections */ #define FCOE_OUTSTANDING_COMMANDS_MASK 0xffff0000 #define FCOE_OUTSTANDING_COMMANDS_SHIFT 16 u32 capability4; #define FCOE_CAPABILITY4_STATEFUL 0x00000001 #define FCOE_CAPABILITY4_STATELESS 0x00000002 #define FCOE_CAPABILITY4_CAPABILITIES_REPORTED_VALID 0x00000004 }; struct glob_ncsi_oem_data { u32 driver_version; u32 unused[3]; struct fcoe_capabilities fcoe_features[NVM_PATH_MAX][PORT_MAX]; }; /* current drv_info version */ #define DRV_INFO_CUR_VER 2 /* drv_info op codes supported */ enum drv_info_opcode { ETH_STATS_OPCODE, FCOE_STATS_OPCODE, ISCSI_STATS_OPCODE }; #define ETH_STAT_INFO_VERSION_LEN 12 /* Per PCI Function Ethernet Statistics required from the driver */ struct eth_stats_info { /* Function's Driver Version. padded to 12 */ u8 version[ETH_STAT_INFO_VERSION_LEN]; /* Locally Admin Addr. BigEndian EIU48. Actual size is 6 bytes */ u8 mac_local[8]; u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ u32 mtu_size; /* MTU Size. Note : Negotiated MTU */ u32 feature_flags; /* Feature_Flags. */ #define FEATURE_ETH_CHKSUM_OFFLOAD_MASK 0x01 #define FEATURE_ETH_LSO_MASK 0x02 #define FEATURE_ETH_BOOTMODE_MASK 0x1C #define FEATURE_ETH_BOOTMODE_SHIFT 2 #define FEATURE_ETH_BOOTMODE_NONE (0x0 << 2) #define FEATURE_ETH_BOOTMODE_PXE (0x1 << 2) #define FEATURE_ETH_BOOTMODE_ISCSI (0x2 << 2) #define FEATURE_ETH_BOOTMODE_FCOE (0x3 << 2) #define FEATURE_ETH_TOE_MASK 0x20 u32 lso_max_size; /* LSO MaxOffloadSize. */ u32 lso_min_seg_cnt; /* LSO MinSegmentCount. */ /* Num Offloaded Connections TCP_IPv4. */ u32 ipv4_ofld_cnt; /* Num Offloaded Connections TCP_IPv6. */ u32 ipv6_ofld_cnt; u32 promiscuous_mode; /* Promiscuous Mode. non-zero true */ u32 txq_size; /* TX Descriptors Queue Size */ u32 rxq_size; /* RX Descriptors Queue Size */ /* TX Descriptor Queue Avg Depth. % Avg Queue Depth since last poll */ u32 txq_avg_depth; /* RX Descriptors Queue Avg Depth. % Avg Queue Depth since last poll */ u32 rxq_avg_depth; /* IOV_Offload. 0=none; 1=MultiQueue, 2=VEB 3= VEPA*/ u32 iov_offload; /* Number of NetQueue/VMQ Config'd. */ u32 netq_cnt; u32 vf_cnt; /* Num VF assigned to this PF. */ }; /* Per PCI Function FCOE Statistics required from the driver */ struct fcoe_stats_info { u8 version[12]; /* Function's Driver Version. */ u8 mac_local[8]; /* Locally Admin Addr. */ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ u8 mac_add2[8]; /* Additional Programmed MAC Addr 2. */ /* QoS Priority (per 802.1p). 0-7255 */ u32 qos_priority; u32 txq_size; /* FCoE TX Descriptors Queue Size. */ u32 rxq_size; /* FCoE RX Descriptors Queue Size. */ /* FCoE TX Descriptor Queue Avg Depth. */ u32 txq_avg_depth; /* FCoE RX Descriptors Queue Avg Depth. */ u32 rxq_avg_depth; u32 rx_frames_lo; /* FCoE RX Frames received. */ u32 rx_frames_hi; /* FCoE RX Frames received. */ u32 rx_bytes_lo; /* FCoE RX Bytes received. */ u32 rx_bytes_hi; /* FCoE RX Bytes received. */ u32 tx_frames_lo; /* FCoE TX Frames sent. */ u32 tx_frames_hi; /* FCoE TX Frames sent. */ u32 tx_bytes_lo; /* FCoE TX Bytes sent. */ u32 tx_bytes_hi; /* FCoE TX Bytes sent. */ }; /* Per PCI Function iSCSI Statistics required from the driver*/ struct iscsi_stats_info { u8 version[12]; /* Function's Driver Version. */ u8 mac_local[8]; /* Locally Admin iSCSI MAC Addr. */ u8 mac_add1[8]; /* Additional Programmed MAC Addr 1. */ /* QoS Priority (per 802.1p). 0-7255 */ u32 qos_priority; u8 initiator_name[64]; /* iSCSI Boot Initiator Node name. */ u8 ww_port_name[64]; /* iSCSI World wide port name */ u8 boot_target_name[64];/* iSCSI Boot Target Name. */ u8 boot_target_ip[16]; /* iSCSI Boot Target IP. */ u32 boot_target_portal; /* iSCSI Boot Target Portal. */ u8 boot_init_ip[16]; /* iSCSI Boot Initiator IP Address. */ u32 max_frame_size; /* Max Frame Size. bytes */ u32 txq_size; /* PDU TX Descriptors Queue Size. */ u32 rxq_size; /* PDU RX Descriptors Queue Size. */ u32 txq_avg_depth; /* PDU TX Descriptor Queue Avg Depth. */ u32 rxq_avg_depth; /* PDU RX Descriptors Queue Avg Depth. */ u32 rx_pdus_lo; /* iSCSI PDUs received. */ u32 rx_pdus_hi; /* iSCSI PDUs received. */ u32 rx_bytes_lo; /* iSCSI RX Bytes received. */ u32 rx_bytes_hi; /* iSCSI RX Bytes received. */ u32 tx_pdus_lo; /* iSCSI PDUs sent. */ u32 tx_pdus_hi; /* iSCSI PDUs sent. */ u32 tx_bytes_lo; /* iSCSI PDU TX Bytes sent. */ u32 tx_bytes_hi; /* iSCSI PDU TX Bytes sent. */ u32 pcp_prior_map_tbl; /* C-PCP to S-PCP Priority MapTable. * 9 nibbles, the position of each nibble * represents the C-PCP value, the value * of the nibble = S-PCP value. */ }; union drv_info_to_mcp { struct eth_stats_info ether_stat; struct fcoe_stats_info fcoe_stat; struct iscsi_stats_info iscsi_stat; }; #endif /* BNX2X_MFW_REQ_H */
// SPDX-License-Identifier: GPL-2.0-only /* Atlantic Network Driver * Copyright (C) 2020 Marvell International Ltd. */ #include "aq_hw.h" #include "aq_hw_utils.h" #include "aq_ring.h" #include "aq_nic.h" #include "hw_atl/hw_atl_b0.h" #include "hw_atl/hw_atl_utils.h" #include "hw_atl/hw_atl_llh.h" #include "hw_atl/hw_atl_llh_internal.h" #include "hw_atl2_utils.h" #include "hw_atl2_llh.h" #include "hw_atl2_internal.h" #include "hw_atl2_llh_internal.h" static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, u32 tag, u32 mask, u32 action); #define DEFAULT_BOARD_BASIC_CAPABILITIES \ .is_64_dma = true, \ .op64bit = true, \ .msix_irqs = 8U, \ .irq_mask = ~0U, \ .vecs = HW_ATL2_RSS_MAX, \ .tcs_max = HW_ATL2_TC_MAX, \ .rxd_alignment = 1U, \ .rxd_size = HW_ATL2_RXD_SIZE, \ .rxds_max = HW_ATL2_MAX_RXD, \ .rxds_min = HW_ATL2_MIN_RXD, \ .txd_alignment = 1U, \ .txd_size = HW_ATL2_TXD_SIZE, \ .txds_max = HW_ATL2_MAX_TXD, \ .txds_min = HW_ATL2_MIN_TXD, \ .txhwb_alignment = 4096U, \ .tx_rings = HW_ATL2_TX_RINGS, \ .rx_rings = HW_ATL2_RX_RINGS, \ .hw_features = NETIF_F_HW_CSUM | \ NETIF_F_RXCSUM | \ NETIF_F_RXHASH | \ NETIF_F_SG | \ NETIF_F_TSO | \ NETIF_F_TSO6 | \ NETIF_F_LRO | \ NETIF_F_NTUPLE | \ NETIF_F_HW_VLAN_CTAG_FILTER | \ NETIF_F_HW_VLAN_CTAG_RX | \ NETIF_F_HW_VLAN_CTAG_TX | \ NETIF_F_GSO_UDP_L4 | \ NETIF_F_GSO_PARTIAL | \ NETIF_F_HW_TC, \ .hw_priv_flags = IFF_UNICAST_FLT, \ .flow_control = true, \ .mtu = HW_ATL2_MTU_JUMBO, \ .mac_regs_count = 72, \ .hw_alive_check_addr = 0x10U, \ .priv_data_len = sizeof(struct hw_atl2_priv) const struct aq_hw_caps_s hw_atl2_caps_aqc113 = { DEFAULT_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, .link_speed_msk = AQ_NIC_RATE_10G | AQ_NIC_RATE_5G | AQ_NIC_RATE_2G5 | AQ_NIC_RATE_1G | AQ_NIC_RATE_100M | AQ_NIC_RATE_10M, }; const struct aq_hw_caps_s hw_atl2_caps_aqc115c = { DEFAULT_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, .link_speed_msk = AQ_NIC_RATE_2G5 | AQ_NIC_RATE_1G | AQ_NIC_RATE_100M | AQ_NIC_RATE_10M, }; const struct aq_hw_caps_s hw_atl2_caps_aqc116c = { DEFAULT_BOARD_BASIC_CAPABILITIES, .media_type = AQ_HW_MEDIA_TYPE_TP, .link_speed_msk = AQ_NIC_RATE_1G | AQ_NIC_RATE_100M | AQ_NIC_RATE_10M, }; static u32 hw_atl2_sem_act_rslvr_get(struct aq_hw_s *self) { return hw_atl_reg_glb_cpu_sem_get(self, HW_ATL2_FW_SM_ACT_RSLVR); } static int hw_atl2_hw_reset(struct aq_hw_s *self) { struct hw_atl2_priv *priv = self->priv; int err; err = hw_atl2_utils_soft_reset(self); if (err) return err; memset(priv, 0, sizeof(*priv)); self->aq_fw_ops->set_state(self, MPI_RESET); err = aq_hw_err_from_flags(self); return err; } static int hw_atl2_hw_queue_to_tc_map_set(struct aq_hw_s *self) { struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; unsigned int tcs, q_per_tc; unsigned int tc, q; u32 rx_map = 0; u32 tx_map = 0; hw_atl2_tpb_tx_tc_q_rand_map_en_set(self, 1U); switch (cfg->tc_mode) { case AQ_TC_MODE_8TCS: tcs = 8; q_per_tc = 4; break; case AQ_TC_MODE_4TCS: tcs = 4; q_per_tc = 8; break; default: return -EINVAL; } for (tc = 0; tc != tcs; tc++) { unsigned int tc_q_offset = tc * q_per_tc; for (q = tc_q_offset; q != tc_q_offset + q_per_tc; q++) { rx_map |= tc << HW_ATL2_RX_Q_TC_MAP_SHIFT(q); if (HW_ATL2_RX_Q_TC_MAP_ADR(q) != HW_ATL2_RX_Q_TC_MAP_ADR(q + 1)) { aq_hw_write_reg(self, HW_ATL2_RX_Q_TC_MAP_ADR(q), rx_map); rx_map = 0; } tx_map |= tc << HW_ATL2_TX_Q_TC_MAP_SHIFT(q); if (HW_ATL2_TX_Q_TC_MAP_ADR(q) != HW_ATL2_TX_Q_TC_MAP_ADR(q + 1)) { aq_hw_write_reg(self, HW_ATL2_TX_Q_TC_MAP_ADR(q), tx_map); tx_map = 0; } } } return aq_hw_err_from_flags(self); } static int hw_atl2_hw_qos_set(struct aq_hw_s *self) { struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; u32 tx_buff_size = HW_ATL2_TXBUF_MAX; u32 rx_buff_size = HW_ATL2_RXBUF_MAX; unsigned int prio = 0U; u32 tc = 0U; /* TPS Descriptor rate init */ hw_atl_tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U); hw_atl_tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA); /* TPS VM init */ hw_atl_tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U); tx_buff_size /= cfg->tcs; rx_buff_size /= cfg->tcs; for (tc = 0; tc < cfg->tcs; tc++) { u32 threshold = 0U; /* Tx buf size TC0 */ hw_atl_tpb_tx_pkt_buff_size_per_tc_set(self, tx_buff_size, tc); threshold = (tx_buff_size * (1024 / 32U) * 66U) / 100U; hw_atl_tpb_tx_buff_hi_threshold_per_tc_set(self, threshold, tc); threshold = (tx_buff_size * (1024 / 32U) * 50U) / 100U; hw_atl_tpb_tx_buff_lo_threshold_per_tc_set(self, threshold, tc); /* QoS Rx buf size per TC */ hw_atl_rpb_rx_pkt_buff_size_per_tc_set(self, rx_buff_size, tc); threshold = (rx_buff_size * (1024U / 32U) * 66U) / 100U; hw_atl_rpb_rx_buff_hi_threshold_per_tc_set(self, threshold, tc); threshold = (rx_buff_size * (1024U / 32U) * 50U) / 100U; hw_atl_rpb_rx_buff_lo_threshold_per_tc_set(self, threshold, tc); hw_atl_b0_set_fc(self, self->aq_nic_cfg->fc.req, tc); } /* QoS 802.1p priority -> TC mapping */ for (prio = 0; prio < 8; ++prio) hw_atl_rpf_rpb_user_priority_tc_map_set(self, prio, cfg->prio_tc_map[prio]); /* ATL2 Apply ring to TC mapping */ hw_atl2_hw_queue_to_tc_map_set(self); return aq_hw_err_from_flags(self); } static int hw_atl2_hw_rss_set(struct aq_hw_s *self, struct aq_rss_parameters *rss_params) { u8 *indirection_table = rss_params->indirection_table; const u32 num_tcs = aq_hw_num_tcs(self); u32 rpf_redir2_enable; int tc; int i; rpf_redir2_enable = num_tcs > 4 ? 1 : 0; hw_atl2_rpf_redirection_table2_select_set(self, rpf_redir2_enable); for (i = HW_ATL2_RSS_REDIRECTION_MAX; i--;) { for (tc = 0; tc != num_tcs; tc++) { hw_atl2_new_rpf_rss_redir_set(self, tc, i, tc * aq_hw_q_per_tc(self) + indirection_table[i]); } } return aq_hw_err_from_flags(self); } static int hw_atl2_hw_init_tx_tc_rate_limit(struct aq_hw_s *self) { static const u32 max_weight = BIT(HW_ATL2_TPS_DATA_TCTWEIGHT_WIDTH) - 1; /* Scale factor is based on the number of bits in fractional portion */ static const u32 scale = BIT(HW_ATL_TPS_DESC_RATE_Y_WIDTH); static const u32 frac_msk = HW_ATL_TPS_DESC_RATE_Y_MSK >> HW_ATL_TPS_DESC_RATE_Y_SHIFT; const u32 link_speed = self->aq_link_status.mbps; struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; unsigned long num_min_rated_tcs = 0; u32 tc_weight[AQ_CFG_TCS_MAX]; u32 fixed_max_credit_4b; u32 fixed_max_credit; u8 min_rate_msk = 0; u32 sum_weight = 0; int tc; /* By default max_credit is based upon MTU (in unit of 64b) */ fixed_max_credit = nic_cfg->aq_hw_caps->mtu / 64; /* in unit of 4b */ fixed_max_credit_4b = nic_cfg->aq_hw_caps->mtu / 4; if (link_speed) { min_rate_msk = nic_cfg->tc_min_rate_msk & (BIT(nic_cfg->tcs) - 1); num_min_rated_tcs = hweight8(min_rate_msk); } /* First, calculate weights where min_rate is specified */ if (num_min_rated_tcs) { for (tc = 0; tc != nic_cfg->tcs; tc++) { if (!nic_cfg->tc_min_rate[tc]) { tc_weight[tc] = 0; continue; } tc_weight[tc] = (-1L + link_speed + nic_cfg->tc_min_rate[tc] * max_weight) / link_speed; tc_weight[tc] = min(tc_weight[tc], max_weight); sum_weight += tc_weight[tc]; } } /* WSP, if min_rate is set for at least one TC. * RR otherwise. */ hw_atl2_tps_tx_pkt_shed_data_arb_mode_set(self, min_rate_msk ? 1U : 0U); /* Data TC Arbiter takes precedence over Descriptor TC Arbiter, * leave Descriptor TC Arbiter as RR. */ hw_atl_tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U); hw_atl_tps_tx_desc_rate_mode_set(self, nic_cfg->is_qos ? 1U : 0U); for (tc = 0; tc != nic_cfg->tcs; tc++) { const u32 en = (nic_cfg->tc_max_rate[tc] != 0) ? 1U : 0U; const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); u32 weight, max_credit; hw_atl_tps_tx_pkt_shed_desc_tc_max_credit_set(self, tc, fixed_max_credit); hw_atl_tps_tx_pkt_shed_desc_tc_weight_set(self, tc, 0x1E); if (num_min_rated_tcs) { weight = tc_weight[tc]; if (!weight && sum_weight < max_weight) weight = (max_weight - sum_weight) / (nic_cfg->tcs - num_min_rated_tcs); else if (!weight) weight = 0x640; max_credit = max(2 * weight, fixed_max_credit_4b); } else { weight = 0x640; max_credit = 0xFFF0; } hw_atl2_tps_tx_pkt_shed_tc_data_weight_set(self, tc, weight); hw_atl2_tps_tx_pkt_shed_tc_data_max_credit_set(self, tc, max_credit); hw_atl_tps_tx_desc_rate_en_set(self, desc, en); if (en) { /* Nominal rate is always 10G */ const u32 rate = 10000U * scale / nic_cfg->tc_max_rate[tc]; const u32 rate_int = rate >> HW_ATL_TPS_DESC_RATE_Y_WIDTH; const u32 rate_frac = rate & frac_msk; hw_atl_tps_tx_desc_rate_x_set(self, desc, rate_int); hw_atl_tps_tx_desc_rate_y_set(self, desc, rate_frac); } else { /* A value of 1 indicates the queue is not * rate controlled. */ hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); } } for (tc = nic_cfg->tcs; tc != AQ_CFG_TCS_MAX; tc++) { const u32 desc = AQ_NIC_CFG_TCVEC2RING(nic_cfg, tc, 0); hw_atl_tps_tx_desc_rate_en_set(self, desc, 0U); hw_atl_tps_tx_desc_rate_x_set(self, desc, 1U); hw_atl_tps_tx_desc_rate_y_set(self, desc, 0U); } return aq_hw_err_from_flags(self); } static int hw_atl2_hw_init_tx_path(struct aq_hw_s *self) { struct aq_nic_cfg_s *nic_cfg = self->aq_nic_cfg; /* Tx TC/RSS number config */ hw_atl_tpb_tps_tx_tc_mode_set(self, nic_cfg->tc_mode); hw_atl_thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U); hw_atl_thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U); hw_atl_thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU); /* Tx interrupts */ hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); /* misc */ hw_atl_tdm_tx_dca_en_set(self, 0U); hw_atl_tdm_tx_dca_mode_set(self, 0U); hw_atl_tpb_tx_path_scp_ins_en_set(self, 1U); hw_atl2_tpb_tx_buf_clk_gate_en_set(self, 0U); return aq_hw_err_from_flags(self); } static void hw_atl2_hw_init_new_rx_filters(struct aq_hw_s *self) { u8 *prio_tc_map = self->aq_nic_cfg->prio_tc_map; struct hw_atl2_priv *priv = self->priv; u16 action; u8 index; int i; /* Action Resolver Table (ART) is used by RPF to decide which action * to take with a packet based upon input tag and tag mask, where: * - input tag is a combination of 3-bit VLan Prio (PTP) and * 29-bit concatenation of all tags from filter block; * - tag mask is a mask used for matching against input tag. * The input_tag is compared with the all the Requested_tags in the * Record table to find a match. Action field of the selected matched * REC entry is used for further processing. If multiple entries match, * the lowest REC entry, Action field will be selected. */ hw_atl2_rpf_act_rslvr_section_en_set(self, 0xFFFF); hw_atl2_rpfl2_uc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC, HW_ATL2_MAC_UC); hw_atl2_rpfl2_bc_flr_tag_set(self, HW_ATL2_RPF_TAG_BASE_UC); /* FW reserves the beginning of ART, thus all driver entries must * start from the offset specified in FW caps. */ index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX; hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_UC_MASK | HW_ATL2_RPF_TAG_ALLMC_MASK, HW_ATL2_ACTION_DROP); index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX; hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_VLAN_MASK | HW_ATL2_RPF_TAG_UNTAG_MASK, HW_ATL2_ACTION_DROP); /* Configure ART to map given VLan Prio (PCP) to the TC index for * RSS redirection table. */ for (i = 0; i < 8; i++) { action = HW_ATL2_ACTION_ASSIGN_TC(prio_tc_map[i]); index = priv->art_base_index + HW_ATL2_RPF_PCP_TO_TC_INDEX + i; hw_atl2_act_rslvr_table_set(self, index, i << HW_ATL2_RPF_TAG_PCP_OFFSET, HW_ATL2_RPF_TAG_PCP_MASK, action); } } static void hw_atl2_hw_new_rx_filter_vlan_promisc(struct aq_hw_s *self, bool promisc) { u16 off_action = (!promisc && !hw_atl_rpfl2promiscuous_mode_en_get(self)) ? HW_ATL2_ACTION_DROP : HW_ATL2_ACTION_DISABLE; struct hw_atl2_priv *priv = self->priv; u8 index; index = priv->art_base_index + HW_ATL2_RPF_VLAN_PROMISC_OFF_INDEX; hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_VLAN_MASK | HW_ATL2_RPF_TAG_UNTAG_MASK, off_action); } static void hw_atl2_hw_new_rx_filter_promisc(struct aq_hw_s *self, bool promisc) { u16 off_action = promisc ? HW_ATL2_ACTION_DISABLE : HW_ATL2_ACTION_DROP; struct hw_atl2_priv *priv = self->priv; bool vlan_promisc_enable; u8 index; index = priv->art_base_index + HW_ATL2_RPF_L2_PROMISC_OFF_INDEX; hw_atl2_act_rslvr_table_set(self, index, 0, HW_ATL2_RPF_TAG_UC_MASK | HW_ATL2_RPF_TAG_ALLMC_MASK, off_action); /* turn VLAN promisc mode too */ vlan_promisc_enable = hw_atl_rpf_vlan_prom_mode_en_get(self); hw_atl2_hw_new_rx_filter_vlan_promisc(self, promisc | vlan_promisc_enable); } static int hw_atl2_act_rslvr_table_set(struct aq_hw_s *self, u8 location, u32 tag, u32 mask, u32 action) { u32 val; int err; err = readx_poll_timeout_atomic(hw_atl2_sem_act_rslvr_get, self, val, val == 1, 1, 10000U); if (err) return err; hw_atl2_rpf_act_rslvr_record_set(self, location, tag, mask, action); hw_atl_reg_glb_cpu_sem_set(self, 1, HW_ATL2_FW_SM_ACT_RSLVR); return err; } static int hw_atl2_hw_init_rx_path(struct aq_hw_s *self) { struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; int i; /* Rx TC/RSS number config */ hw_atl_rpb_rpf_rx_traf_class_mode_set(self, cfg->tc_mode); /* Rx flow control */ hw_atl_rpb_rx_flow_ctl_mode_set(self, 1U); hw_atl2_rpf_rss_hash_type_set(self, HW_ATL2_RPF_RSS_HASH_TYPE_ALL); /* RSS Ring selection */ hw_atl_b0_hw_init_rx_rss_ctrl1(self); /* Multicast filters */ for (i = HW_ATL2_MAC_MAX; i--;) { hw_atl_rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i); hw_atl_rpfl2unicast_flr_act_set(self, 1U, i); } hw_atl_reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U); hw_atl_reg_rx_flr_mcst_flr_set(self, HW_ATL_MCAST_FLT_ANY_TO_HOST, 0U); /* Vlan filters */ hw_atl_rpf_vlan_outer_etht_set(self, ETH_P_8021AD); hw_atl_rpf_vlan_inner_etht_set(self, ETH_P_8021Q); hw_atl_rpf_vlan_prom_mode_en_set(self, 1); /* Always accept untagged packets */ hw_atl_rpf_vlan_accept_untagged_packets_set(self, 1U); hw_atl_rpf_vlan_untagged_act_set(self, 1U); hw_atl2_hw_init_new_rx_filters(self); /* Rx Interrupts */ hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); hw_atl_rpfl2broadcast_flr_act_set(self, 1U); hw_atl_rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U)); hw_atl_rdm_rx_dca_en_set(self, 0U); hw_atl_rdm_rx_dca_mode_set(self, 0U); return aq_hw_err_from_flags(self); } static int hw_atl2_hw_init(struct aq_hw_s *self, const u8 *mac_addr) { static u32 aq_hw_atl2_igcr_table_[4][2] = { [AQ_HW_IRQ_INVALID] = { 0x20000000U, 0x20000000U }, [AQ_HW_IRQ_INTX] = { 0x20000080U, 0x20000080U }, [AQ_HW_IRQ_MSI] = { 0x20000021U, 0x20000025U }, [AQ_HW_IRQ_MSIX] = { 0x20000022U, 0x20000026U }, }; struct aq_nic_cfg_s *aq_nic_cfg = self->aq_nic_cfg; struct hw_atl2_priv *priv = self->priv; u8 base_index, count; int err; err = hw_atl2_utils_get_action_resolve_table_caps(self, &base_index, &count); if (err) return err; priv->art_base_index = 8 * base_index; hw_atl2_init_launchtime(self); hw_atl2_hw_init_tx_path(self); hw_atl2_hw_init_rx_path(self); hw_atl_b0_hw_mac_addr_set(self, mac_addr); self->aq_fw_ops->set_link_speed(self, aq_nic_cfg->link_speed_msk); self->aq_fw_ops->set_state(self, MPI_INIT); hw_atl2_hw_qos_set(self); hw_atl2_hw_rss_set(self, &aq_nic_cfg->aq_rss); hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss); hw_atl2_rpf_new_enable_set(self, 1); /* Reset link status and read out initial hardware counters */ self->aq_link_status.mbps = 0; self->aq_fw_ops->update_stats(self); err = aq_hw_err_from_flags(self); if (err < 0) goto err_exit; /* Interrupts */ hw_atl_reg_irq_glb_ctl_set(self, aq_hw_atl2_igcr_table_[aq_nic_cfg->irq_type] [(aq_nic_cfg->vecs > 1U) ? 1 : 0]); hw_atl_itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask); /* Interrupts */ hw_atl_reg_gen_irq_map_set(self, ((HW_ATL2_ERR_INT << 0x18) | (1U << 0x1F)) | ((HW_ATL2_ERR_INT << 0x10) | (1U << 0x17)), 0U); hw_atl_b0_hw_offload_set(self, aq_nic_cfg); err_exit: return err; } static int hw_atl2_hw_ring_rx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, struct aq_ring_param_s *aq_ring_param) { return hw_atl_b0_hw_ring_rx_init(self, aq_ring, aq_ring_param); } static int hw_atl2_hw_ring_tx_init(struct aq_hw_s *self, struct aq_ring_s *aq_ring, struct aq_ring_param_s *aq_ring_param) { return hw_atl_b0_hw_ring_tx_init(self, aq_ring, aq_ring_param); } #define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U) static int hw_atl2_hw_packet_filter_set(struct aq_hw_s *self, unsigned int packet_filter) { hw_atl2_hw_new_rx_filter_promisc(self, IS_FILTER_ENABLED(IFF_PROMISC)); return hw_atl_b0_hw_packet_filter_set(self, packet_filter); } #undef IS_FILTER_ENABLED static int hw_atl2_hw_multicast_list_set(struct aq_hw_s *self, u8 ar_mac [AQ_HW_MULTICAST_ADDRESS_MAX] [ETH_ALEN], u32 count) { struct aq_nic_cfg_s *cfg = self->aq_nic_cfg; int err = 0; if (count > (HW_ATL2_MAC_MAX - HW_ATL2_MAC_MIN)) { err = -EBADRQC; goto err_exit; } for (cfg->mc_list_count = 0U; cfg->mc_list_count < count; ++cfg->mc_list_count) { u32 i = cfg->mc_list_count; u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]); u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) | (ar_mac[i][4] << 8) | ar_mac[i][5]; hw_atl_rpfl2_uc_flr_en_set(self, 0U, HW_ATL2_MAC_MIN + i); hw_atl_rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL2_MAC_MIN + i); hw_atl_rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL2_MAC_MIN + i); hw_atl2_rpfl2_uc_flr_tag_set(self, 1, HW_ATL2_MAC_MIN + i); hw_atl_rpfl2_uc_flr_en_set(self, (cfg->is_mc_list_enabled), HW_ATL2_MAC_MIN + i); } err = aq_hw_err_from_flags(self); err_exit: return err; } static int hw_atl2_hw_interrupt_moderation_set(struct aq_hw_s *self) { unsigned int i = 0U; u32 itr_tx = 2U; u32 itr_rx = 2U; switch (self->aq_nic_cfg->itr) { case AQ_CFG_INTERRUPT_MODERATION_ON: case AQ_CFG_INTERRUPT_MODERATION_AUTO: hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 0U); hw_atl_tdm_tdm_intr_moder_en_set(self, 1U); hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 0U); hw_atl_rdm_rdm_intr_moder_en_set(self, 1U); if (self->aq_nic_cfg->itr == AQ_CFG_INTERRUPT_MODERATION_ON) { /* HW timers are in 2us units */ int tx_max_timer = self->aq_nic_cfg->tx_itr / 2; int tx_min_timer = tx_max_timer / 2; int rx_max_timer = self->aq_nic_cfg->rx_itr / 2; int rx_min_timer = rx_max_timer / 2; tx_max_timer = min(HW_ATL2_INTR_MODER_MAX, tx_max_timer); tx_min_timer = min(HW_ATL2_INTR_MODER_MIN, tx_min_timer); rx_max_timer = min(HW_ATL2_INTR_MODER_MAX, rx_max_timer); rx_min_timer = min(HW_ATL2_INTR_MODER_MIN, rx_min_timer); itr_tx |= tx_min_timer << 0x8U; itr_tx |= tx_max_timer << 0x10U; itr_rx |= rx_min_timer << 0x8U; itr_rx |= rx_max_timer << 0x10U; } else { static unsigned int hw_atl2_timers_table_tx_[][2] = { {0xfU, 0xffU}, /* 10Gbit */ {0xfU, 0x1ffU}, /* 5Gbit */ {0xfU, 0x1ffU}, /* 5Gbit 5GS */ {0xfU, 0x1ffU}, /* 2.5Gbit */ {0xfU, 0x1ffU}, /* 1Gbit */ {0xfU, 0x1ffU}, /* 100Mbit */ }; static unsigned int hw_atl2_timers_table_rx_[][2] = { {0x6U, 0x38U},/* 10Gbit */ {0xCU, 0x70U},/* 5Gbit */ {0xCU, 0x70U},/* 5Gbit 5GS */ {0x18U, 0xE0U},/* 2.5Gbit */ {0x30U, 0x80U},/* 1Gbit */ {0x4U, 0x50U},/* 100Mbit */ }; unsigned int mbps = self->aq_link_status.mbps; unsigned int speed_index; speed_index = hw_atl_utils_mbps_2_speed_index(mbps); /* Update user visible ITR settings */ self->aq_nic_cfg->tx_itr = hw_atl2_timers_table_tx_ [speed_index][1] * 2; self->aq_nic_cfg->rx_itr = hw_atl2_timers_table_rx_ [speed_index][1] * 2; itr_tx |= hw_atl2_timers_table_tx_ [speed_index][0] << 0x8U; itr_tx |= hw_atl2_timers_table_tx_ [speed_index][1] << 0x10U; itr_rx |= hw_atl2_timers_table_rx_ [speed_index][0] << 0x8U; itr_rx |= hw_atl2_timers_table_rx_ [speed_index][1] << 0x10U; } break; case AQ_CFG_INTERRUPT_MODERATION_OFF: hw_atl_tdm_tx_desc_wr_wb_irq_en_set(self, 1U); hw_atl_tdm_tdm_intr_moder_en_set(self, 0U); hw_atl_rdm_rx_desc_wr_wb_irq_en_set(self, 1U); hw_atl_rdm_rdm_intr_moder_en_set(self, 0U); itr_tx = 0U; itr_rx = 0U; break; } for (i = HW_ATL2_RINGS_MAX; i--;) { hw_atl2_reg_tx_intr_moder_ctrl_set(self, itr_tx, i); hw_atl_reg_rx_intr_moder_ctrl_set(self, itr_rx, i); } return aq_hw_err_from_flags(self); } static int hw_atl2_hw_stop(struct aq_hw_s *self) { hw_atl_b0_hw_irq_disable(self, HW_ATL2_INT_MASK); return 0; } static struct aq_stats_s *hw_atl2_utils_get_hw_stats(struct aq_hw_s *self) { return &self->curr_stats; } static int hw_atl2_hw_vlan_set(struct aq_hw_s *self, struct aq_rx_filter_vlan *aq_vlans) { struct hw_atl2_priv *priv = self->priv; u32 queue; u8 index; int i; hw_atl_rpf_vlan_prom_mode_en_set(self, 1U); for (i = 0; i < HW_ATL_VLAN_MAX_FILTERS; i++) { queue = HW_ATL2_ACTION_ASSIGN_QUEUE(aq_vlans[i].queue); hw_atl_rpf_vlan_flr_en_set(self, 0U, i); hw_atl_rpf_vlan_rxq_en_flr_set(self, 0U, i); index = priv->art_base_index + HW_ATL2_RPF_VLAN_USER_INDEX + i; hw_atl2_act_rslvr_table_set(self, index, 0, 0, HW_ATL2_ACTION_DISABLE); if (aq_vlans[i].enable) { hw_atl_rpf_vlan_id_flr_set(self, aq_vlans[i].vlan_id, i); hw_atl_rpf_vlan_flr_act_set(self, 1U, i); hw_atl_rpf_vlan_flr_en_set(self, 1U, i); if (aq_vlans[i].queue != 0xFF) { hw_atl_rpf_vlan_rxq_flr_set(self, aq_vlans[i].queue, i); hw_atl_rpf_vlan_rxq_en_flr_set(self, 1U, i); hw_atl2_rpf_vlan_flr_tag_set(self, i + 2, i); index = priv->art_base_index + HW_ATL2_RPF_VLAN_USER_INDEX + i; hw_atl2_act_rslvr_table_set(self, index, (i + 2) << HW_ATL2_RPF_TAG_VLAN_OFFSET, HW_ATL2_RPF_TAG_VLAN_MASK, queue); } else { hw_atl2_rpf_vlan_flr_tag_set(self, 1, i); } } } return aq_hw_err_from_flags(self); } static int hw_atl2_hw_vlan_ctrl(struct aq_hw_s *self, bool enable) { /* set promisc in case of disabing the vlan filter */ hw_atl_rpf_vlan_prom_mode_en_set(self, !enable); hw_atl2_hw_new_rx_filter_vlan_promisc(self, !enable); return aq_hw_err_from_flags(self); } const struct aq_hw_ops hw_atl2_ops = { .hw_soft_reset = hw_atl2_utils_soft_reset, .hw_prepare = hw_atl2_utils_initfw, .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set, .hw_init = hw_atl2_hw_init, .hw_reset = hw_atl2_hw_reset, .hw_start = hw_atl_b0_hw_start, .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start, .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop, .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start, .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop, .hw_stop = hw_atl2_hw_stop, .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit, .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update, .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive, .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill, .hw_irq_enable = hw_atl_b0_hw_irq_enable, .hw_irq_disable = hw_atl_b0_hw_irq_disable, .hw_irq_read = hw_atl_b0_hw_irq_read, .hw_ring_rx_init = hw_atl2_hw_ring_rx_init, .hw_ring_tx_init = hw_atl2_hw_ring_tx_init, .hw_packet_filter_set = hw_atl2_hw_packet_filter_set, .hw_filter_vlan_set = hw_atl2_hw_vlan_set, .hw_filter_vlan_ctrl = hw_atl2_hw_vlan_ctrl, .hw_multicast_list_set = hw_atl2_hw_multicast_list_set, .hw_interrupt_moderation_set = hw_atl2_hw_interrupt_moderation_set, .hw_rss_set = hw_atl2_hw_rss_set, .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set, .hw_tc_rate_limit_set = hw_atl2_hw_init_tx_tc_rate_limit, .hw_get_hw_stats = hw_atl2_utils_get_hw_stats, .hw_get_fw_version = hw_atl2_utils_get_fw_version, .hw_set_offload = hw_atl_b0_hw_offload_set, .hw_set_loopback = hw_atl_b0_set_loopback, .hw_set_fc = hw_atl_b0_set_fc, };
// SPDX-License-Identifier: GPL-2.0 /****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * ******************************************************************************/ #include <drv_types.h> #include <hal_btcoex.h> #include <linux/jiffies.h> static struct _cmd_callback rtw_cmd_callback[] = { {GEN_CMD_CODE(_Read_MACREG), NULL}, /*0*/ {GEN_CMD_CODE(_Write_MACREG), NULL}, {GEN_CMD_CODE(_Read_BBREG), &rtw_getbbrfreg_cmdrsp_callback}, {GEN_CMD_CODE(_Write_BBREG), NULL}, {GEN_CMD_CODE(_Read_RFREG), &rtw_getbbrfreg_cmdrsp_callback}, {GEN_CMD_CODE(_Write_RFREG), NULL}, /*5*/ {GEN_CMD_CODE(_Read_EEPROM), NULL}, {GEN_CMD_CODE(_Write_EEPROM), NULL}, {GEN_CMD_CODE(_Read_EFUSE), NULL}, {GEN_CMD_CODE(_Write_EFUSE), NULL}, {GEN_CMD_CODE(_Read_CAM), NULL}, /*10*/ {GEN_CMD_CODE(_Write_CAM), NULL}, {GEN_CMD_CODE(_setBCNITV), NULL}, {GEN_CMD_CODE(_setMBIDCFG), NULL}, {GEN_CMD_CODE(_JoinBss), &rtw_joinbss_cmd_callback}, /*14*/ {GEN_CMD_CODE(_DisConnect), &rtw_disassoc_cmd_callback}, /*15*/ {GEN_CMD_CODE(_CreateBss), &rtw_createbss_cmd_callback}, {GEN_CMD_CODE(_SetOpMode), NULL}, {GEN_CMD_CODE(_SiteSurvey), &rtw_survey_cmd_callback}, /*18*/ {GEN_CMD_CODE(_SetAuth), NULL}, {GEN_CMD_CODE(_SetKey), NULL}, /*20*/ {GEN_CMD_CODE(_SetStaKey), &rtw_setstaKey_cmdrsp_callback}, {GEN_CMD_CODE(_SetAssocSta), &rtw_setassocsta_cmdrsp_callback}, {GEN_CMD_CODE(_DelAssocSta), NULL}, {GEN_CMD_CODE(_SetStaPwrState), NULL}, {GEN_CMD_CODE(_SetBasicRate), NULL}, /*25*/ {GEN_CMD_CODE(_GetBasicRate), NULL}, {GEN_CMD_CODE(_SetDataRate), NULL}, {GEN_CMD_CODE(_GetDataRate), NULL}, {GEN_CMD_CODE(_SetPhyInfo), NULL}, {GEN_CMD_CODE(_GetPhyInfo), NULL}, /*30*/ {GEN_CMD_CODE(_SetPhy), NULL}, {GEN_CMD_CODE(_GetPhy), NULL}, {GEN_CMD_CODE(_readRssi), NULL}, {GEN_CMD_CODE(_readGain), NULL}, {GEN_CMD_CODE(_SetAtim), NULL}, /*35*/ {GEN_CMD_CODE(_SetPwrMode), NULL}, {GEN_CMD_CODE(_JoinbssRpt), NULL}, {GEN_CMD_CODE(_SetRaTable), NULL}, {GEN_CMD_CODE(_GetRaTable), NULL}, {GEN_CMD_CODE(_GetCCXReport), NULL}, /*40*/ {GEN_CMD_CODE(_GetDTMReport), NULL}, {GEN_CMD_CODE(_GetTXRateStatistics), NULL}, {GEN_CMD_CODE(_SetUsbSuspend), NULL}, {GEN_CMD_CODE(_SetH2cLbk), NULL}, {GEN_CMD_CODE(_AddBAReq), NULL}, /*45*/ {GEN_CMD_CODE(_SetChannel), NULL}, /*46*/ {GEN_CMD_CODE(_SetTxPower), NULL}, {GEN_CMD_CODE(_SwitchAntenna), NULL}, {GEN_CMD_CODE(_SetCrystalCap), NULL}, {GEN_CMD_CODE(_SetSingleCarrierTx), NULL}, /*50*/ {GEN_CMD_CODE(_SetSingleToneTx), NULL}, /*51*/ {GEN_CMD_CODE(_SetCarrierSuppressionTx), NULL}, {GEN_CMD_CODE(_SetContinuousTx), NULL}, {GEN_CMD_CODE(_SwitchBandwidth), NULL}, /*54*/ {GEN_CMD_CODE(_TX_Beacon), NULL},/*55*/ {GEN_CMD_CODE(_Set_MLME_EVT), NULL},/*56*/ {GEN_CMD_CODE(_Set_Drv_Extra), NULL},/*57*/ {GEN_CMD_CODE(_Set_H2C_MSG), NULL},/*58*/ {GEN_CMD_CODE(_SetChannelPlan), NULL},/*59*/ {GEN_CMD_CODE(_SetChannelSwitch), NULL},/*60*/ {GEN_CMD_CODE(_TDLS), NULL},/*61*/ {GEN_CMD_CODE(_ChkBMCSleepq), NULL}, /*62*/ {GEN_CMD_CODE(_RunInThreadCMD), NULL},/*63*/ }; static struct cmd_hdl wlancmds[] = { GEN_DRV_CMD_HANDLER(0, NULL) /*0*/ GEN_DRV_CMD_HANDLER(0, NULL) GEN_DRV_CMD_HANDLER(0, NULL) GEN_DRV_CMD_HANDLER(0, NULL) GEN_DRV_CMD_HANDLER(0, NULL) GEN_DRV_CMD_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) /*10*/ GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(sizeof(struct joinbss_parm), join_cmd_hdl) /*14*/ GEN_MLME_EXT_HANDLER(sizeof(struct disconnect_parm), disconnect_hdl) GEN_MLME_EXT_HANDLER(sizeof(struct createbss_parm), createbss_hdl) GEN_MLME_EXT_HANDLER(sizeof(struct setopmode_parm), setopmode_hdl) GEN_MLME_EXT_HANDLER(sizeof(struct sitesurvey_parm), sitesurvey_cmd_hdl) /*18*/ GEN_MLME_EXT_HANDLER(sizeof(struct setauth_parm), setauth_hdl) GEN_MLME_EXT_HANDLER(sizeof(struct setkey_parm), setkey_hdl) /*20*/ GEN_MLME_EXT_HANDLER(sizeof(struct set_stakey_parm), set_stakey_hdl) GEN_MLME_EXT_HANDLER(sizeof(struct set_assocsta_parm), NULL) GEN_MLME_EXT_HANDLER(sizeof(struct del_assocsta_parm), NULL) GEN_MLME_EXT_HANDLER(sizeof(struct setstapwrstate_parm), NULL) GEN_MLME_EXT_HANDLER(sizeof(struct setbasicrate_parm), NULL) GEN_MLME_EXT_HANDLER(sizeof(struct getbasicrate_parm), NULL) GEN_MLME_EXT_HANDLER(sizeof(struct setdatarate_parm), NULL) GEN_MLME_EXT_HANDLER(sizeof(struct getdatarate_parm), NULL) GEN_MLME_EXT_HANDLER(sizeof(struct setphyinfo_parm), NULL) GEN_MLME_EXT_HANDLER(sizeof(struct getphyinfo_parm), NULL) /*30*/ GEN_MLME_EXT_HANDLER(sizeof(struct setphy_parm), NULL) GEN_MLME_EXT_HANDLER(sizeof(struct getphy_parm), NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) /*40*/ GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(sizeof(struct addBaReq_parm), add_ba_hdl) GEN_MLME_EXT_HANDLER(sizeof(struct set_ch_parm), set_ch_hdl) /* 46 */ GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) /*50*/ GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(0, NULL) GEN_MLME_EXT_HANDLER(sizeof(struct Tx_Beacon_param), tx_beacon_hdl) /*55*/ GEN_MLME_EXT_HANDLER(0, mlme_evt_hdl) /*56*/ GEN_MLME_EXT_HANDLER(0, rtw_drvextra_cmd_hdl) /*57*/ GEN_MLME_EXT_HANDLER(0, h2c_msg_hdl) /*58*/ GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelPlan_param), set_chplan_hdl) /*59*/ GEN_MLME_EXT_HANDLER(sizeof(struct SetChannelSwitch_param), set_csa_hdl) /*60*/ GEN_MLME_EXT_HANDLER(sizeof(struct TDLSoption_param), tdls_hdl) /*61*/ GEN_MLME_EXT_HANDLER(0, chk_bmc_sleepq_hdl) /*62*/ GEN_MLME_EXT_HANDLER(sizeof(struct RunInThread_param), run_in_thread_hdl) /*63*/ }; /* * Caller and the rtw_cmd_thread can protect cmd_q by spin_lock. * No irqsave is necessary. */ int rtw_init_cmd_priv(struct cmd_priv *pcmdpriv) { init_completion(&pcmdpriv->cmd_queue_comp); init_completion(&pcmdpriv->terminate_cmdthread_comp); INIT_LIST_HEAD(&pcmdpriv->cmd_queue.queue); spin_lock_init(&pcmdpriv->cmd_queue.lock); /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */ pcmdpriv->cmd_seq = 1; pcmdpriv->cmd_allocated_buf = rtw_zmalloc(MAX_CMDSZ + CMDBUFF_ALIGN_SZ); if (!pcmdpriv->cmd_allocated_buf) return -ENOMEM; pcmdpriv->cmd_buf = pcmdpriv->cmd_allocated_buf + CMDBUFF_ALIGN_SZ - ((SIZE_PTR)(pcmdpriv->cmd_allocated_buf) & (CMDBUFF_ALIGN_SZ-1)); pcmdpriv->rsp_allocated_buf = rtw_zmalloc(MAX_RSPSZ + 4); if (!pcmdpriv->rsp_allocated_buf) { kfree(pcmdpriv->cmd_allocated_buf); return -ENOMEM; } pcmdpriv->rsp_buf = pcmdpriv->rsp_allocated_buf + 4 - ((SIZE_PTR)(pcmdpriv->rsp_allocated_buf) & 3); pcmdpriv->cmd_issued_cnt = 0; pcmdpriv->cmd_done_cnt = 0; pcmdpriv->rsp_cnt = 0; mutex_init(&pcmdpriv->sctx_mutex); return 0; } static void c2h_wk_callback(struct work_struct *work); int rtw_init_evt_priv(struct evt_priv *pevtpriv) { /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */ atomic_set(&pevtpriv->event_seq, 0); pevtpriv->evt_done_cnt = 0; _init_workitem(&pevtpriv->c2h_wk, c2h_wk_callback, NULL); pevtpriv->c2h_wk_alive = false; pevtpriv->c2h_queue = rtw_cbuf_alloc(C2H_QUEUE_MAX_LEN+1); if (!pevtpriv->c2h_queue) return -ENOMEM; return 0; } void _rtw_free_evt_priv(struct evt_priv *pevtpriv) { _cancel_workitem_sync(&pevtpriv->c2h_wk); while (pevtpriv->c2h_wk_alive) msleep(10); while (!rtw_cbuf_empty(pevtpriv->c2h_queue)) { void *c2h = rtw_cbuf_pop(pevtpriv->c2h_queue); if (c2h && c2h != (void *)pevtpriv) kfree(c2h); } kfree(pevtpriv->c2h_queue); } void _rtw_free_cmd_priv(struct cmd_priv *pcmdpriv) { if (pcmdpriv) { kfree(pcmdpriv->cmd_allocated_buf); kfree(pcmdpriv->rsp_allocated_buf); mutex_destroy(&pcmdpriv->sctx_mutex); } } /* * Calling Context: * * rtw_enqueue_cmd can only be called between kernel thread, * since only spin_lock is used. * * ISR/Call-Back functions can't call this sub-function. * */ int _rtw_enqueue_cmd(struct __queue *queue, struct cmd_obj *obj) { unsigned long irqL; if (!obj) goto exit; /* spin_lock_bh(&queue->lock); */ spin_lock_irqsave(&queue->lock, irqL); list_add_tail(&obj->list, &queue->queue); /* spin_unlock_bh(&queue->lock); */ spin_unlock_irqrestore(&queue->lock, irqL); exit: return _SUCCESS; } struct cmd_obj *_rtw_dequeue_cmd(struct __queue *queue) { unsigned long irqL; struct cmd_obj *obj; /* spin_lock_bh(&(queue->lock)); */ spin_lock_irqsave(&queue->lock, irqL); if (list_empty(&queue->queue)) obj = NULL; else { obj = container_of(get_next(&queue->queue), struct cmd_obj, list); list_del_init(&obj->list); } /* spin_unlock_bh(&(queue->lock)); */ spin_unlock_irqrestore(&queue->lock, irqL); return obj; } void rtw_free_evt_priv(struct evt_priv *pevtpriv) { _rtw_free_evt_priv(pevtpriv); } void rtw_free_cmd_priv(struct cmd_priv *pcmdpriv) { _rtw_free_cmd_priv(pcmdpriv); } int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj); int rtw_cmd_filter(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj) { u8 bAllow = false; /* set to true to allow enqueuing cmd when hw_init_completed is false */ if (cmd_obj->cmdcode == GEN_CMD_CODE(_SetChannelPlan)) bAllow = true; if ((!pcmdpriv->padapter->hw_init_completed && !bAllow) || !atomic_read(&pcmdpriv->cmdthd_running)) /* com_thread not running */ return _FAIL; return _SUCCESS; } int rtw_enqueue_cmd(struct cmd_priv *pcmdpriv, struct cmd_obj *cmd_obj) { int res = _FAIL; struct adapter *padapter = pcmdpriv->padapter; if (!cmd_obj) goto exit; cmd_obj->padapter = padapter; res = rtw_cmd_filter(pcmdpriv, cmd_obj); if (res == _FAIL) { rtw_free_cmd_obj(cmd_obj); goto exit; } res = _rtw_enqueue_cmd(&pcmdpriv->cmd_queue, cmd_obj); if (res == _SUCCESS) complete(&pcmdpriv->cmd_queue_comp); exit: return res; } struct cmd_obj *rtw_dequeue_cmd(struct cmd_priv *pcmdpriv) { return _rtw_dequeue_cmd(&pcmdpriv->cmd_queue); } void rtw_free_cmd_obj(struct cmd_obj *pcmd) { if ((pcmd->cmdcode != _JoinBss_CMD_) && (pcmd->cmdcode != _CreateBss_CMD_)) { /* free parmbuf in cmd_obj */ kfree(pcmd->parmbuf); } if (pcmd->rsp) { if (pcmd->rspsz != 0) { /* free rsp in cmd_obj */ kfree(pcmd->rsp); } } /* free cmd_obj */ kfree(pcmd); } void rtw_stop_cmd_thread(struct adapter *adapter) { if (adapter->cmdThread && atomic_read(&adapter->cmdpriv.cmdthd_running) && adapter->cmdpriv.stop_req == 0) { adapter->cmdpriv.stop_req = 1; complete(&adapter->cmdpriv.cmd_queue_comp); wait_for_completion(&adapter->cmdpriv.terminate_cmdthread_comp); } } int rtw_cmd_thread(void *context) { u8 ret; struct cmd_obj *pcmd; u8 *pcmdbuf; u8 (*cmd_hdl)(struct adapter *padapter, u8 *pbuf); void (*pcmd_callback)(struct adapter *dev, struct cmd_obj *pcmd); struct adapter *padapter = context; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct drvextra_cmd_parm *extra_parm = NULL; allow_signal(SIGTERM); pcmdbuf = pcmdpriv->cmd_buf; pcmdpriv->stop_req = 0; atomic_set(&pcmdpriv->cmdthd_running, true); complete(&pcmdpriv->terminate_cmdthread_comp); while (1) { if (wait_for_completion_interruptible(&pcmdpriv->cmd_queue_comp)) { netdev_dbg(padapter->pnetdev, FUNC_ADPT_FMT " wait_for_completion_interruptible(&pcmdpriv->cmd_queue_comp) return != 0, break\n", FUNC_ADPT_ARG(padapter)); break; } if (padapter->bDriverStopped || padapter->bSurpriseRemoved) { netdev_dbg(padapter->pnetdev, "%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n", __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__); break; } if (pcmdpriv->stop_req) { netdev_dbg(padapter->pnetdev, FUNC_ADPT_FMT " stop_req:%u, break\n", FUNC_ADPT_ARG(padapter), pcmdpriv->stop_req); break; } if (list_empty(&pcmdpriv->cmd_queue.queue)) continue; if (rtw_register_cmd_alive(padapter) != _SUCCESS) continue; _next: if (padapter->bDriverStopped || padapter->bSurpriseRemoved) { netdev_dbg(padapter->pnetdev, "%s: DriverStopped(%d) SurpriseRemoved(%d) break at line %d\n", __func__, padapter->bDriverStopped, padapter->bSurpriseRemoved, __LINE__); break; } pcmd = rtw_dequeue_cmd(pcmdpriv); if (!pcmd) { rtw_unregister_cmd_alive(padapter); continue; } if (rtw_cmd_filter(pcmdpriv, pcmd) == _FAIL) { pcmd->res = H2C_DROPPED; goto post_process; } pcmdpriv->cmd_issued_cnt++; pcmd->cmdsz = round_up((pcmd->cmdsz), 4); memcpy(pcmdbuf, pcmd->parmbuf, pcmd->cmdsz); if (pcmd->cmdcode < ARRAY_SIZE(wlancmds)) { cmd_hdl = wlancmds[pcmd->cmdcode].h2cfuns; if (cmd_hdl) { ret = cmd_hdl(pcmd->padapter, pcmdbuf); pcmd->res = ret; } pcmdpriv->cmd_seq++; } else { pcmd->res = H2C_PARAMETERS_ERROR; } cmd_hdl = NULL; post_process: if (mutex_lock_interruptible(&pcmd->padapter->cmdpriv.sctx_mutex) == 0) { if (pcmd->sctx) { netdev_dbg(padapter->pnetdev, FUNC_ADPT_FMT " pcmd->sctx\n", FUNC_ADPT_ARG(pcmd->padapter)); if (pcmd->res == H2C_SUCCESS) rtw_sctx_done(&pcmd->sctx); else rtw_sctx_done_err(&pcmd->sctx, RTW_SCTX_DONE_CMD_ERROR); } mutex_unlock(&pcmd->padapter->cmdpriv.sctx_mutex); } /* call callback function for post-processed */ if (pcmd->cmdcode < ARRAY_SIZE(rtw_cmd_callback)) { pcmd_callback = rtw_cmd_callback[pcmd->cmdcode].callback; if (!pcmd_callback) { rtw_free_cmd_obj(pcmd); } else { /* todo: !!! fill rsp_buf to pcmd->rsp if (pcmd->rsp!= NULL) */ pcmd_callback(pcmd->padapter, pcmd);/* need consider that free cmd_obj in rtw_cmd_callback */ } } else { rtw_free_cmd_obj(pcmd); } flush_signals_thread(); goto _next; } /* free all cmd_obj resources */ do { pcmd = rtw_dequeue_cmd(pcmdpriv); if (!pcmd) { rtw_unregister_cmd_alive(padapter); break; } if (pcmd->cmdcode == GEN_CMD_CODE(_Set_Drv_Extra)) { extra_parm = (struct drvextra_cmd_parm *)pcmd->parmbuf; if (extra_parm->pbuf && extra_parm->size > 0) kfree(extra_parm->pbuf); } rtw_free_cmd_obj(pcmd); } while (1); complete(&pcmdpriv->terminate_cmdthread_comp); atomic_set(&pcmdpriv->cmdthd_running, false); return 0; } /* * rtw_sitesurvey_cmd(~) * ### NOTE:#### (!!!!) * MUST TAKE CARE THAT BEFORE CALLING THIS FUNC, YOU SHOULD HAVE LOCKED pmlmepriv->lock */ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid, int ssid_num, struct rtw_ieee80211_channel *ch, int ch_num) { u8 res = _FAIL; struct cmd_obj *ph2c; struct sitesurvey_parm *psurveyPara; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if (check_fwstate(pmlmepriv, _FW_LINKED)) rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1); ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) return _FAIL; psurveyPara = rtw_zmalloc(sizeof(struct sitesurvey_parm)); if (!psurveyPara) { kfree(ph2c); return _FAIL; } rtw_free_network_queue(padapter, false); init_h2fwcmd_w_parm_no_rsp(ph2c, psurveyPara, GEN_CMD_CODE(_SiteSurvey)); /* psurveyPara->bsslimit = 48; */ psurveyPara->scan_mode = pmlmepriv->scan_mode; /* prepare ssid list */ if (ssid) { int i; for (i = 0; i < ssid_num && i < RTW_SSID_SCAN_AMOUNT; i++) { if (ssid[i].ssid_length) { memcpy(&psurveyPara->ssid[i], &ssid[i], sizeof(struct ndis_802_11_ssid)); psurveyPara->ssid_num++; } } } /* prepare channel list */ if (ch) { int i; for (i = 0; i < ch_num && i < RTW_CHANNEL_SCAN_AMOUNT; i++) { if (ch[i].hw_value && !(ch[i].flags & RTW_IEEE80211_CHAN_DISABLED)) { memcpy(&psurveyPara->ch[i], &ch[i], sizeof(struct rtw_ieee80211_channel)); psurveyPara->ch_num++; } } } set_fwstate(pmlmepriv, _FW_UNDER_SURVEY); res = rtw_enqueue_cmd(pcmdpriv, ph2c); if (res == _SUCCESS) { pmlmepriv->scan_start_time = jiffies; _set_timer(&pmlmepriv->scan_to_timer, SCANNING_TIMEOUT); } else { _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); } return res; } void rtw_getbbrfreg_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd) { /* rtw_free_cmd_obj(pcmd); */ kfree(pcmd->parmbuf); kfree(pcmd); } u8 rtw_createbss_cmd(struct adapter *padapter) { struct cmd_obj *pcmd; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct wlan_bssid_ex *pdev_network = &padapter->registrypriv.dev_network; u8 res = _SUCCESS; pcmd = rtw_zmalloc(sizeof(struct cmd_obj)); if (!pcmd) { res = _FAIL; goto exit; } INIT_LIST_HEAD(&pcmd->list); pcmd->cmdcode = _CreateBss_CMD_; pcmd->parmbuf = (unsigned char *)pdev_network; pcmd->cmdsz = get_wlan_bssid_ex_sz((struct wlan_bssid_ex *)pdev_network); pcmd->rsp = NULL; pcmd->rspsz = 0; pdev_network->length = pcmd->cmdsz; res = rtw_enqueue_cmd(pcmdpriv, pcmd); exit: return res; } int rtw_startbss_cmd(struct adapter *padapter, int flags) { struct cmd_obj *pcmd; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct submit_ctx sctx; int res = _SUCCESS; if (flags & RTW_CMDF_DIRECTLY) { /* no need to enqueue, do the cmd hdl directly and free cmd parameter */ start_bss_network(padapter); } else { /* need enqueue, prepare cmd_obj and enqueue */ pcmd = rtw_zmalloc(sizeof(struct cmd_obj)); if (!pcmd) { res = _FAIL; goto exit; } INIT_LIST_HEAD(&pcmd->list); pcmd->cmdcode = GEN_CMD_CODE(_CreateBss); pcmd->parmbuf = NULL; pcmd->cmdsz = 0; pcmd->rsp = NULL; pcmd->rspsz = 0; if (flags & RTW_CMDF_WAIT_ACK) { pcmd->sctx = &sctx; rtw_sctx_init(&sctx, 2000); } res = rtw_enqueue_cmd(pcmdpriv, pcmd); if (res == _SUCCESS && (flags & RTW_CMDF_WAIT_ACK)) { rtw_sctx_wait(&sctx); if (mutex_lock_interruptible(&pcmdpriv->sctx_mutex) == 0) { if (sctx.status == RTW_SCTX_SUBMITTED) pcmd->sctx = NULL; mutex_unlock(&pcmdpriv->sctx_mutex); } } } exit: return res; } u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork) { u8 res = _SUCCESS; uint t_len = 0; struct wlan_bssid_ex *psecnetwork; struct cmd_obj *pcmd; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; struct security_priv *psecuritypriv = &padapter->securitypriv; struct registry_priv *pregistrypriv = &padapter->registrypriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; enum ndis_802_11_network_infrastructure ndis_network_mode = pnetwork->network.infrastructure_mode; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info; u32 tmp_len; u8 *ptmp = NULL; pcmd = rtw_zmalloc(sizeof(struct cmd_obj)); if (!pcmd) { res = _FAIL; goto exit; } /* for ies is fix buf size */ t_len = sizeof(struct wlan_bssid_ex); /* for hidden ap to set fw_state here */ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) != true) { switch (ndis_network_mode) { case Ndis802_11IBSS: set_fwstate(pmlmepriv, WIFI_ADHOC_STATE); break; case Ndis802_11Infrastructure: set_fwstate(pmlmepriv, WIFI_STATION_STATE); break; case Ndis802_11APMode: case Ndis802_11AutoUnknown: case Ndis802_11InfrastructureMax: break; } } psecnetwork = (struct wlan_bssid_ex *)&psecuritypriv->sec_bss; memset(psecnetwork, 0, t_len); memcpy(psecnetwork, &pnetwork->network, get_wlan_bssid_ex_sz(&pnetwork->network)); psecuritypriv->authenticator_ie[0] = (unsigned char)psecnetwork->ie_length; if ((psecnetwork->ie_length-12) < (256-1)) memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], psecnetwork->ie_length-12); else memcpy(&psecuritypriv->authenticator_ie[1], &psecnetwork->ies[12], (256-1)); psecnetwork->ie_length = 0; /* Added by Albert 2009/02/18 */ /* If the driver wants to use the bssid to create the connection. */ /* If not, we have to copy the connecting AP's MAC address to it so that */ /* the driver just has the bssid information for PMKIDList searching. */ if (!pmlmepriv->assoc_by_bssid) memcpy(&pmlmepriv->assoc_bssid[0], &pnetwork->network.mac_address[0], ETH_ALEN); psecnetwork->ie_length = rtw_restruct_sec_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length); pqospriv->qos_option = 0; if (pregistrypriv->wmm_enable) { tmp_len = rtw_restruct_wmm_ie(padapter, &pnetwork->network.ies[0], &psecnetwork->ies[0], pnetwork->network.ie_length, psecnetwork->ie_length); if (psecnetwork->ie_length != tmp_len) { psecnetwork->ie_length = tmp_len; pqospriv->qos_option = 1; /* There is WMM IE in this corresp. beacon */ } else { pqospriv->qos_option = 0;/* There is no WMM IE in this corresp. beacon */ } } phtpriv->ht_option = false; ptmp = rtw_get_ie(&pnetwork->network.ies[12], WLAN_EID_HT_CAPABILITY, &tmp_len, pnetwork->network.ie_length-12); if (pregistrypriv->ht_enable && ptmp && tmp_len > 0) { /* Added by Albert 2010/06/23 */ /* For the WEP mode, we will use the bg mode to do the connection to avoid some IOT issue. */ /* Especially for Realtek 8192u SoftAP. */ if ((padapter->securitypriv.dot11PrivacyAlgrthm != _WEP40_) && (padapter->securitypriv.dot11PrivacyAlgrthm != _WEP104_) && (padapter->securitypriv.dot11PrivacyAlgrthm != _TKIP_)) { rtw_ht_use_default_setting(padapter); rtw_build_wmm_ie_ht(padapter, &psecnetwork->ies[12], &psecnetwork->ie_length); /* rtw_restructure_ht_ie */ rtw_restructure_ht_ie(padapter, &pnetwork->network.ies[12], &psecnetwork->ies[0], pnetwork->network.ie_length-12, &psecnetwork->ie_length, pnetwork->network.configuration.ds_config); } } rtw_append_exented_cap(padapter, &psecnetwork->ies[0], &psecnetwork->ie_length); pmlmeinfo->assoc_AP_vendor = check_assoc_AP(pnetwork->network.ies, pnetwork->network.ie_length); pcmd->cmdsz = get_wlan_bssid_ex_sz(psecnetwork);/* get cmdsz before endian conversion */ INIT_LIST_HEAD(&pcmd->list); pcmd->cmdcode = _JoinBss_CMD_;/* GEN_CMD_CODE(_JoinBss) */ pcmd->parmbuf = (unsigned char *)psecnetwork; pcmd->rsp = NULL; pcmd->rspsz = 0; res = rtw_enqueue_cmd(pcmdpriv, pcmd); exit: return res; } u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueue) /* for sta_mode */ { struct cmd_obj *cmdobj = NULL; struct disconnect_parm *param = NULL; struct cmd_priv *cmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; /* prepare cmd parameter */ param = rtw_zmalloc(sizeof(*param)); if (!param) { res = _FAIL; goto exit; } param->deauth_timeout_ms = deauth_timeout_ms; if (enqueue) { /* need enqueue, prepare cmd_obj and enqueue */ cmdobj = rtw_zmalloc(sizeof(*cmdobj)); if (!cmdobj) { res = _FAIL; kfree(param); goto exit; } init_h2fwcmd_w_parm_no_rsp(cmdobj, param, _DisConnect_CMD_); res = rtw_enqueue_cmd(cmdpriv, cmdobj); } else { /* no need to enqueue, do the cmd hdl directly and free cmd parameter */ if (disconnect_hdl(padapter, (u8 *)param) != H2C_SUCCESS) res = _FAIL; kfree(param); } exit: return res; } u8 rtw_setopmode_cmd(struct adapter *padapter, enum ndis_802_11_network_infrastructure networktype, bool enqueue) { struct cmd_obj *ph2c; struct setopmode_parm *psetop; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; psetop = rtw_zmalloc(sizeof(struct setopmode_parm)); if (!psetop) { res = _FAIL; goto exit; } psetop->mode = (u8)networktype; if (enqueue) { ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { kfree(psetop); res = _FAIL; goto exit; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetop, _SetOpMode_CMD_); res = rtw_enqueue_cmd(pcmdpriv, ph2c); } else { setopmode_hdl(padapter, (u8 *)psetop); kfree(psetop); } exit: return res; } u8 rtw_setstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 unicast_key, bool enqueue) { struct cmd_obj *ph2c; struct set_stakey_parm *psetstakey_para; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct set_stakey_rsp *psetstakey_rsp = NULL; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct security_priv *psecuritypriv = &padapter->securitypriv; u8 res = _SUCCESS; psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm)); if (!psetstakey_para) { res = _FAIL; goto exit; } memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) psetstakey_para->algorithm = (unsigned char)psecuritypriv->dot11PrivacyAlgrthm; else GET_ENCRY_ALGO(psecuritypriv, sta, psetstakey_para->algorithm, false); if (unicast_key) memcpy(&psetstakey_para->key, &sta->dot118021x_UncstKey, 16); else memcpy(&psetstakey_para->key, &psecuritypriv->dot118021XGrpKey[psecuritypriv->dot118021XGrpKeyid].skey, 16); /* jeff: set this because at least sw key is ready */ padapter->securitypriv.busetkipkey = true; if (enqueue) { ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { kfree(psetstakey_para); res = _FAIL; goto exit; } psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp)); if (!psetstakey_rsp) { kfree(ph2c); kfree(psetstakey_para); res = _FAIL; goto exit; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_); ph2c->rsp = (u8 *)psetstakey_rsp; ph2c->rspsz = sizeof(struct set_stakey_rsp); res = rtw_enqueue_cmd(pcmdpriv, ph2c); } else { set_stakey_hdl(padapter, (u8 *)psetstakey_para); kfree(psetstakey_para); } exit: return res; } u8 rtw_clearstakey_cmd(struct adapter *padapter, struct sta_info *sta, u8 enqueue) { struct cmd_obj *ph2c; struct set_stakey_parm *psetstakey_para; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct set_stakey_rsp *psetstakey_rsp = NULL; s16 cam_id = 0; u8 res = _SUCCESS; if (!enqueue) { while ((cam_id = rtw_camid_search(padapter, sta->hwaddr, -1)) >= 0) { netdev_dbg(padapter->pnetdev, "clear key for addr:%pM, camid:%d\n", MAC_ARG(sta->hwaddr), cam_id); clear_cam_entry(padapter, cam_id); rtw_camid_free(padapter, cam_id); } } else { ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } psetstakey_para = rtw_zmalloc(sizeof(struct set_stakey_parm)); if (!psetstakey_para) { kfree(ph2c); res = _FAIL; goto exit; } psetstakey_rsp = rtw_zmalloc(sizeof(struct set_stakey_rsp)); if (!psetstakey_rsp) { kfree(ph2c); kfree(psetstakey_para); res = _FAIL; goto exit; } init_h2fwcmd_w_parm_no_rsp(ph2c, psetstakey_para, _SetStaKey_CMD_); ph2c->rsp = (u8 *)psetstakey_rsp; ph2c->rspsz = sizeof(struct set_stakey_rsp); memcpy(psetstakey_para->addr, sta->hwaddr, ETH_ALEN); psetstakey_para->algorithm = _NO_PRIVACY_; res = rtw_enqueue_cmd(pcmdpriv, ph2c); } exit: return res; } u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr) { struct cmd_priv *pcmdpriv = &padapter->cmdpriv; struct cmd_obj *ph2c; struct addBaReq_parm *paddbareq_parm; u8 res = _SUCCESS; ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } paddbareq_parm = rtw_zmalloc(sizeof(struct addBaReq_parm)); if (!paddbareq_parm) { kfree(ph2c); res = _FAIL; goto exit; } paddbareq_parm->tid = tid; memcpy(paddbareq_parm->addr, addr, ETH_ALEN); init_h2fwcmd_w_parm_no_rsp(ph2c, paddbareq_parm, GEN_CMD_CODE(_AddBAReq)); /* rtw_enqueue_cmd(pcmdpriv, ph2c); */ res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; } /* add for CONFIG_IEEE80211W, none 11w can use it */ u8 rtw_reset_securitypriv_cmd(struct adapter *padapter) { struct cmd_obj *ph2c; struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; } pdrvextra_cmd_parm->ec_id = RESET_SECURITYPRIV; pdrvextra_cmd_parm->type = 0; pdrvextra_cmd_parm->size = 0; pdrvextra_cmd_parm->pbuf = NULL; init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra)); /* rtw_enqueue_cmd(pcmdpriv, ph2c); */ res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; } u8 rtw_free_assoc_resources_cmd(struct adapter *padapter) { struct cmd_obj *ph2c; struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; } pdrvextra_cmd_parm->ec_id = FREE_ASSOC_RESOURCES; pdrvextra_cmd_parm->type = 0; pdrvextra_cmd_parm->size = 0; pdrvextra_cmd_parm->pbuf = NULL; init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra)); /* rtw_enqueue_cmd(pcmdpriv, ph2c); */ res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; } u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter) { struct cmd_obj *ph2c; struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; /* only primary padapter does this cmd */ ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; } pdrvextra_cmd_parm->ec_id = DYNAMIC_CHK_WK_CID; pdrvextra_cmd_parm->type = 0; pdrvextra_cmd_parm->size = 0; pdrvextra_cmd_parm->pbuf = NULL; init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra)); /* rtw_enqueue_cmd(pcmdpriv, ph2c); */ res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; } static void collect_traffic_statistics(struct adapter *padapter) { struct dvobj_priv *pdvobjpriv = adapter_to_dvobj(padapter); /* Tx */ pdvobjpriv->traffic_stat.tx_bytes = padapter->xmitpriv.tx_bytes; pdvobjpriv->traffic_stat.tx_pkts = padapter->xmitpriv.tx_pkts; pdvobjpriv->traffic_stat.tx_drop = padapter->xmitpriv.tx_drop; /* Rx */ pdvobjpriv->traffic_stat.rx_bytes = padapter->recvpriv.rx_bytes; pdvobjpriv->traffic_stat.rx_pkts = padapter->recvpriv.rx_pkts; pdvobjpriv->traffic_stat.rx_drop = padapter->recvpriv.rx_drop; /* Calculate throughput in last interval */ pdvobjpriv->traffic_stat.cur_tx_bytes = pdvobjpriv->traffic_stat.tx_bytes - pdvobjpriv->traffic_stat.last_tx_bytes; pdvobjpriv->traffic_stat.cur_rx_bytes = pdvobjpriv->traffic_stat.rx_bytes - pdvobjpriv->traffic_stat.last_rx_bytes; pdvobjpriv->traffic_stat.last_tx_bytes = pdvobjpriv->traffic_stat.tx_bytes; pdvobjpriv->traffic_stat.last_rx_bytes = pdvobjpriv->traffic_stat.rx_bytes; pdvobjpriv->traffic_stat.cur_tx_tp = (u32)(pdvobjpriv->traffic_stat.cur_tx_bytes * 8/2/1024/1024); pdvobjpriv->traffic_stat.cur_rx_tp = (u32)(pdvobjpriv->traffic_stat.cur_rx_bytes * 8/2/1024/1024); } u8 traffic_status_watchdog(struct adapter *padapter, u8 from_timer) { u8 bEnterPS = false; u16 BusyThresholdHigh = 25; u16 BusyThresholdLow = 10; u16 BusyThreshold = BusyThresholdHigh; u8 bBusyTraffic = false, bTxBusyTraffic = false, bRxBusyTraffic = false; u8 bHigherBusyTraffic = false, bHigherBusyRxTraffic = false, bHigherBusyTxTraffic = false; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; collect_traffic_statistics(padapter); /* */ /* Determine if our traffic is busy now */ /* */ if ((check_fwstate(pmlmepriv, _FW_LINKED)) /*&& !MgntInitAdapterInProgress(pMgntInfo)*/) { /* if we raise bBusyTraffic in last watchdog, using lower threshold. */ if (pmlmepriv->LinkDetectInfo.bBusyTraffic) BusyThreshold = BusyThresholdLow; if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > BusyThreshold || pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > BusyThreshold) { bBusyTraffic = true; if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) bRxBusyTraffic = true; else bTxBusyTraffic = true; } /* Higher Tx/Rx data. */ if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > 4000 || pmlmepriv->LinkDetectInfo.NumTxOkInPeriod > 4000) { bHigherBusyTraffic = true; if (pmlmepriv->LinkDetectInfo.NumRxOkInPeriod > pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) bHigherBusyRxTraffic = true; else bHigherBusyTxTraffic = true; } /* check traffic for powersaving. */ if (((pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod + pmlmepriv->LinkDetectInfo.NumTxOkInPeriod) > 8) || (pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) { bEnterPS = false; if (bBusyTraffic) { if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount <= 4) pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 4; pmlmepriv->LinkDetectInfo.TrafficTransitionCount++; if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount > 30/*TrafficTransitionLevel*/) pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 30; } } else { if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount >= 2) pmlmepriv->LinkDetectInfo.TrafficTransitionCount -= 2; else pmlmepriv->LinkDetectInfo.TrafficTransitionCount = 0; if (pmlmepriv->LinkDetectInfo.TrafficTransitionCount == 0) bEnterPS = true; } /* LeisurePS only work in infra mode. */ if (bEnterPS) { if (!from_timer) LPS_Enter(padapter, "TRAFFIC_IDLE"); } else { if (!from_timer) LPS_Leave(padapter, "TRAFFIC_BUSY"); else rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_TRAFFIC_BUSY, 1); } } else { struct dvobj_priv *dvobj = adapter_to_dvobj(padapter); int n_assoc_iface = 0; if (check_fwstate(&dvobj->padapters->mlmepriv, WIFI_ASOC_STATE)) n_assoc_iface++; if (!from_timer && n_assoc_iface == 0) LPS_Leave(padapter, "NON_LINKED"); } pmlmepriv->LinkDetectInfo.NumRxOkInPeriod = 0; pmlmepriv->LinkDetectInfo.NumTxOkInPeriod = 0; pmlmepriv->LinkDetectInfo.NumRxUnicastOkInPeriod = 0; pmlmepriv->LinkDetectInfo.bBusyTraffic = bBusyTraffic; pmlmepriv->LinkDetectInfo.bTxBusyTraffic = bTxBusyTraffic; pmlmepriv->LinkDetectInfo.bRxBusyTraffic = bRxBusyTraffic; pmlmepriv->LinkDetectInfo.bHigherBusyTraffic = bHigherBusyTraffic; pmlmepriv->LinkDetectInfo.bHigherBusyRxTraffic = bHigherBusyRxTraffic; pmlmepriv->LinkDetectInfo.bHigherBusyTxTraffic = bHigherBusyTxTraffic; return bEnterPS; } static void dynamic_chk_wk_hdl(struct adapter *padapter) { struct mlme_priv *pmlmepriv; pmlmepriv = &padapter->mlmepriv; if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) expire_timeout_chk(padapter); /* for debug purpose */ _linked_info_dump(padapter); /* if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING|_FW_UNDER_SURVEY) ==false) */ { linked_status_chk(padapter); traffic_status_watchdog(padapter, 0); } rtw_hal_dm_watchdog(padapter); /* check_hw_pbc(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->type); */ /* */ /* BT-Coexist */ /* */ hal_btcoex_Handler(padapter); /* always call rtw_ps_processor() at last one. */ rtw_ps_processor(padapter); } void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type); void lps_ctrl_wk_hdl(struct adapter *padapter, u8 lps_ctrl_type) { struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; u8 mstatus; if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) || check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { return; } switch (lps_ctrl_type) { case LPS_CTRL_SCAN: hal_btcoex_ScanNotify(padapter, true); if (check_fwstate(pmlmepriv, _FW_LINKED)) { /* connect */ LPS_Leave(padapter, "LPS_CTRL_SCAN"); } break; case LPS_CTRL_JOINBSS: LPS_Leave(padapter, "LPS_CTRL_JOINBSS"); break; case LPS_CTRL_CONNECT: mstatus = 1;/* connect */ /* Reset LPS Setting */ pwrpriv->LpsIdleCount = 0; rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus)); rtw_btcoex_MediaStatusNotify(padapter, mstatus); break; case LPS_CTRL_DISCONNECT: mstatus = 0;/* disconnect */ rtw_btcoex_MediaStatusNotify(padapter, mstatus); LPS_Leave(padapter, "LPS_CTRL_DISCONNECT"); rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_JOINBSSRPT, (u8 *)(&mstatus)); break; case LPS_CTRL_SPECIAL_PACKET: pwrpriv->DelayLPSLastTimeStamp = jiffies; hal_btcoex_SpecialPacketNotify(padapter, PACKET_DHCP); LPS_Leave(padapter, "LPS_CTRL_SPECIAL_PACKET"); break; case LPS_CTRL_LEAVE: LPS_Leave(padapter, "LPS_CTRL_LEAVE"); break; case LPS_CTRL_TRAFFIC_BUSY: LPS_Leave(padapter, "LPS_CTRL_TRAFFIC_BUSY"); break; default: break; } } u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue) { struct cmd_obj *ph2c; struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; /* struct pwrctrl_priv *pwrctrlpriv = adapter_to_pwrctl(padapter); */ u8 res = _SUCCESS; /* if (!pwrctrlpriv->bLeisurePs) */ /* return res; */ if (enqueue) { ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; } pdrvextra_cmd_parm->ec_id = LPS_CTRL_WK_CID; pdrvextra_cmd_parm->type = lps_ctrl_type; pdrvextra_cmd_parm->size = 0; pdrvextra_cmd_parm->pbuf = NULL; init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra)); res = rtw_enqueue_cmd(pcmdpriv, ph2c); } else { lps_ctrl_wk_hdl(padapter, lps_ctrl_type); } exit: return res; } static void rtw_dm_in_lps_hdl(struct adapter *padapter) { rtw_hal_set_hwreg(padapter, HW_VAR_DM_IN_LPS, NULL); } u8 rtw_dm_in_lps_wk_cmd(struct adapter *padapter) { struct cmd_obj *ph2c; struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; } pdrvextra_cmd_parm->ec_id = DM_IN_LPS_WK_CID; pdrvextra_cmd_parm->type = 0; pdrvextra_cmd_parm->size = 0; pdrvextra_cmd_parm->pbuf = NULL; init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra)); res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; } static void rtw_lps_change_dtim_hdl(struct adapter *padapter, u8 dtim) { struct pwrctrl_priv *pwrpriv = adapter_to_pwrctl(padapter); if (dtim <= 0 || dtim > 16) return; if (hal_btcoex_IsBtControlLps(padapter)) return; mutex_lock(&pwrpriv->lock); pwrpriv->dtim = dtim; if (pwrpriv->fw_current_in_ps_mode && (pwrpriv->pwr_mode > PS_MODE_ACTIVE)) { u8 ps_mode = pwrpriv->pwr_mode; rtw_hal_set_hwreg(padapter, HW_VAR_H2C_FW_PWRMODE, (u8 *)(&ps_mode)); } mutex_unlock(&pwrpriv->lock); } static void rtw_dm_ra_mask_hdl(struct adapter *padapter, struct sta_info *psta) { if (psta) set_sta_rate(padapter, psta); } u8 rtw_dm_ra_mask_wk_cmd(struct adapter *padapter, u8 *psta) { struct cmd_obj *ph2c; struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; } pdrvextra_cmd_parm->ec_id = DM_RA_MSK_WK_CID; pdrvextra_cmd_parm->type = 0; pdrvextra_cmd_parm->size = 0; pdrvextra_cmd_parm->pbuf = psta; init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra)); res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; } u8 rtw_ps_cmd(struct adapter *padapter) { struct cmd_obj *ppscmd; struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; ppscmd = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ppscmd) { res = _FAIL; goto exit; } pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); if (!pdrvextra_cmd_parm) { kfree(ppscmd); res = _FAIL; goto exit; } pdrvextra_cmd_parm->ec_id = POWER_SAVING_CTRL_WK_CID; pdrvextra_cmd_parm->type = 0; pdrvextra_cmd_parm->size = 0; pdrvextra_cmd_parm->pbuf = NULL; init_h2fwcmd_w_parm_no_rsp(ppscmd, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra)); res = rtw_enqueue_cmd(pcmdpriv, ppscmd); exit: return res; } u32 g_wait_hiq_empty; static void rtw_chk_hi_queue_hdl(struct adapter *padapter) { struct sta_info *psta_bmc; struct sta_priv *pstapriv = &padapter->stapriv; unsigned long start = jiffies; u8 empty = false; psta_bmc = rtw_get_bcmc_stainfo(padapter); if (!psta_bmc) return; rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &empty); while (!empty && jiffies_to_msecs(jiffies - start) < g_wait_hiq_empty) { msleep(100); rtw_hal_get_hwreg(padapter, HW_VAR_CHK_HI_QUEUE_EMPTY, &empty); } if (psta_bmc->sleepq_len == 0) { if (empty == _SUCCESS) { bool update_tim = false; if (pstapriv->tim_bitmap & BIT(0)) update_tim = true; pstapriv->tim_bitmap &= ~BIT(0); pstapriv->sta_dz_bitmap &= ~BIT(0); if (update_tim) update_beacon(padapter, WLAN_EID_TIM, NULL, true); } else {/* re check again */ rtw_chk_hi_queue_cmd(padapter); } } } u8 rtw_chk_hi_queue_cmd(struct adapter *padapter) { struct cmd_obj *ph2c; struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; } pdrvextra_cmd_parm->ec_id = CHECK_HIQ_WK_CID; pdrvextra_cmd_parm->type = 0; pdrvextra_cmd_parm->size = 0; pdrvextra_cmd_parm->pbuf = NULL; init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra)); res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; } struct btinfo { u8 cid; u8 len; u8 bConnection:1; u8 bSCOeSCO:1; u8 bInQPage:1; u8 bACLBusy:1; u8 bSCOBusy:1; u8 bHID:1; u8 bA2DP:1; u8 bFTP:1; u8 retry_cnt:4; u8 rsvd_34:1; u8 rsvd_35:1; u8 rsvd_36:1; u8 rsvd_37:1; u8 rssi; u8 rsvd_50:1; u8 rsvd_51:1; u8 rsvd_52:1; u8 rsvd_53:1; u8 rsvd_54:1; u8 rsvd_55:1; u8 eSCO_SCO:1; u8 Master_Slave:1; u8 rsvd_6; u8 rsvd_7; }; static void rtw_btinfo_hdl(struct adapter *adapter, u8 *buf, u16 buf_len) { #define BTINFO_WIFI_FETCH 0x23 #define BTINFO_BT_AUTO_RPT 0x27 struct btinfo *info = (struct btinfo *)buf; u8 cmd_idx; u8 len; cmd_idx = info->cid; if (info->len > buf_len-2) { rtw_warn_on(1); len = buf_len-2; } else { len = info->len; } /* transform BT-FW btinfo to WiFI-FW C2H format and notify */ if (cmd_idx == BTINFO_WIFI_FETCH) buf[1] = 0; else if (cmd_idx == BTINFO_BT_AUTO_RPT) buf[1] = 2; hal_btcoex_BtInfoNotify(adapter, len+1, &buf[1]); } u8 rtw_c2h_packet_wk_cmd(struct adapter *padapter, u8 *pbuf, u16 length) { struct cmd_obj *ph2c; struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; } pdrvextra_cmd_parm->ec_id = C2H_WK_CID; pdrvextra_cmd_parm->type = 0; pdrvextra_cmd_parm->size = length; pdrvextra_cmd_parm->pbuf = pbuf; init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra)); res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; } /* dont call R/W in this function, beucase SDIO interrupt have claim host */ /* or deadlock will happen and cause special-systemserver-died in android */ u8 rtw_c2h_wk_cmd(struct adapter *padapter, u8 *c2h_evt) { struct cmd_obj *ph2c; struct drvextra_cmd_parm *pdrvextra_cmd_parm; struct cmd_priv *pcmdpriv = &padapter->cmdpriv; u8 res = _SUCCESS; ph2c = rtw_zmalloc(sizeof(struct cmd_obj)); if (!ph2c) { res = _FAIL; goto exit; } pdrvextra_cmd_parm = rtw_zmalloc(sizeof(struct drvextra_cmd_parm)); if (!pdrvextra_cmd_parm) { kfree(ph2c); res = _FAIL; goto exit; } pdrvextra_cmd_parm->ec_id = C2H_WK_CID; pdrvextra_cmd_parm->type = 0; pdrvextra_cmd_parm->size = c2h_evt?16:0; pdrvextra_cmd_parm->pbuf = c2h_evt; init_h2fwcmd_w_parm_no_rsp(ph2c, pdrvextra_cmd_parm, GEN_CMD_CODE(_Set_Drv_Extra)); res = rtw_enqueue_cmd(pcmdpriv, ph2c); exit: return res; } static void c2h_wk_callback(struct work_struct *work) { struct evt_priv *evtpriv = container_of(work, struct evt_priv, c2h_wk); struct adapter *adapter = container_of(evtpriv, struct adapter, evtpriv); u8 *c2h_evt; c2h_id_filter ccx_id_filter = rtw_hal_c2h_id_filter_ccx(adapter); evtpriv->c2h_wk_alive = true; while (!rtw_cbuf_empty(evtpriv->c2h_queue)) { c2h_evt = (u8 *)rtw_cbuf_pop(evtpriv->c2h_queue); if (c2h_evt) { /* This C2H event is read, clear it */ c2h_evt_clear(adapter); } else { c2h_evt = rtw_malloc(16); if (c2h_evt) { /* This C2H event is not read, read & clear now */ if (c2h_evt_read_88xx(adapter, c2h_evt) != _SUCCESS) { kfree(c2h_evt); continue; } } } /* Special pointer to trigger c2h_evt_clear only */ if ((void *)c2h_evt == (void *)evtpriv) continue; if (!rtw_hal_c2h_valid(adapter, c2h_evt)) { kfree(c2h_evt); continue; } if (ccx_id_filter(c2h_evt)) { /* Handle CCX report here */ rtw_hal_c2h_handler(adapter, c2h_evt); kfree(c2h_evt); } else { /* Enqueue into cmd_thread for others */ rtw_c2h_wk_cmd(adapter, c2h_evt); } } evtpriv->c2h_wk_alive = false; } u8 rtw_drvextra_cmd_hdl(struct adapter *padapter, unsigned char *pbuf) { struct drvextra_cmd_parm *pdrvextra_cmd; if (!pbuf) return H2C_PARAMETERS_ERROR; pdrvextra_cmd = (struct drvextra_cmd_parm *)pbuf; switch (pdrvextra_cmd->ec_id) { case DYNAMIC_CHK_WK_CID:/* only primary padapter go to this cmd, but execute dynamic_chk_wk_hdl() for two interfaces */ dynamic_chk_wk_hdl(padapter); break; case POWER_SAVING_CTRL_WK_CID: rtw_ps_processor(padapter); break; case LPS_CTRL_WK_CID: lps_ctrl_wk_hdl(padapter, (u8)pdrvextra_cmd->type); break; case DM_IN_LPS_WK_CID: rtw_dm_in_lps_hdl(padapter); break; case LPS_CHANGE_DTIM_CID: rtw_lps_change_dtim_hdl(padapter, (u8)pdrvextra_cmd->type); break; case CHECK_HIQ_WK_CID: rtw_chk_hi_queue_hdl(padapter); break; /* add for CONFIG_IEEE80211W, none 11w can use it */ case RESET_SECURITYPRIV: rtw_reset_securitypriv(padapter); break; case FREE_ASSOC_RESOURCES: rtw_free_assoc_resources(padapter, 1); break; case C2H_WK_CID: rtw_hal_set_hwreg_with_buf(padapter, HW_VAR_C2H_HANDLE, pdrvextra_cmd->pbuf, pdrvextra_cmd->size); break; case DM_RA_MSK_WK_CID: rtw_dm_ra_mask_hdl(padapter, (struct sta_info *)pdrvextra_cmd->pbuf); break; case BTINFO_WK_CID: rtw_btinfo_hdl(padapter, pdrvextra_cmd->pbuf, pdrvextra_cmd->size); break; default: break; } if (pdrvextra_cmd->pbuf && pdrvextra_cmd->size > 0) kfree(pdrvextra_cmd->pbuf); return H2C_SUCCESS; } void rtw_survey_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if (pcmd->res != H2C_SUCCESS) { /* TODO: cancel timer and do timeout handler directly... */ _set_timer(&pmlmepriv->scan_to_timer, 1); } /* free cmd */ rtw_free_cmd_obj(pcmd); } void rtw_disassoc_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if (pcmd->res != H2C_SUCCESS) { spin_lock_bh(&pmlmepriv->lock); set_fwstate(pmlmepriv, _FW_LINKED); spin_unlock_bh(&pmlmepriv->lock); return; } /* free cmd */ rtw_free_cmd_obj(pcmd); } void rtw_joinbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if (pcmd->res != H2C_SUCCESS) { /* TODO: cancel timer and do timeout handler directly... */ _set_timer(&pmlmepriv->assoc_timer, 1); } rtw_free_cmd_obj(pcmd); } void rtw_createbss_cmd_callback(struct adapter *padapter, struct cmd_obj *pcmd) { struct sta_info *psta = NULL; struct wlan_network *pwlan = NULL; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct wlan_bssid_ex *pnetwork = (struct wlan_bssid_ex *)pcmd->parmbuf; struct wlan_network *tgt_network = &pmlmepriv->cur_network; if (!pcmd->parmbuf) goto exit; if (pcmd->res != H2C_SUCCESS) _set_timer(&pmlmepriv->assoc_timer, 1); del_timer_sync(&pmlmepriv->assoc_timer); spin_lock_bh(&pmlmepriv->lock); if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { psta = rtw_get_stainfo(&padapter->stapriv, pnetwork->mac_address); if (!psta) { psta = rtw_alloc_stainfo(&padapter->stapriv, pnetwork->mac_address); if (!psta) goto createbss_cmd_fail; } rtw_indicate_connect(padapter); } else { pwlan = rtw_alloc_network(pmlmepriv); spin_lock_bh(&pmlmepriv->scanned_queue.lock); if (!pwlan) { pwlan = rtw_get_oldest_wlan_network(&pmlmepriv->scanned_queue); if (!pwlan) { spin_unlock_bh(&pmlmepriv->scanned_queue.lock); goto createbss_cmd_fail; } pwlan->last_scanned = jiffies; } else { list_add_tail(&pwlan->list, &pmlmepriv->scanned_queue.queue); } pnetwork->length = get_wlan_bssid_ex_sz(pnetwork); memcpy(&pwlan->network, pnetwork, pnetwork->length); /* pwlan->fixed = true; */ /* list_add_tail(&(pwlan->list), &pmlmepriv->scanned_queue.queue); */ /* copy pdev_network information to pmlmepriv->cur_network */ memcpy(&tgt_network->network, pnetwork, (get_wlan_bssid_ex_sz(pnetwork))); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); spin_unlock_bh(&pmlmepriv->scanned_queue.lock); /* we will set _FW_LINKED when there is one more sat to join us (rtw_stassoc_event_callback) */ } createbss_cmd_fail: spin_unlock_bh(&pmlmepriv->lock); exit: rtw_free_cmd_obj(pcmd); } void rtw_setstaKey_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd) { struct sta_priv *pstapriv = &padapter->stapriv; struct set_stakey_rsp *psetstakey_rsp = (struct set_stakey_rsp *)(pcmd->rsp); struct sta_info *psta = rtw_get_stainfo(pstapriv, psetstakey_rsp->addr); if (!psta) goto exit; exit: rtw_free_cmd_obj(pcmd); } void rtw_setassocsta_cmdrsp_callback(struct adapter *padapter, struct cmd_obj *pcmd) { struct sta_priv *pstapriv = &padapter->stapriv; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct set_assocsta_parm *passocsta_parm = (struct set_assocsta_parm *)(pcmd->parmbuf); struct set_assocsta_rsp *passocsta_rsp = (struct set_assocsta_rsp *)(pcmd->rsp); struct sta_info *psta = rtw_get_stainfo(pstapriv, passocsta_parm->addr); if (!psta) goto exit; psta->aid = passocsta_rsp->cam_id; psta->mac_id = passocsta_rsp->cam_id; spin_lock_bh(&pmlmepriv->lock); if (check_fwstate(pmlmepriv, WIFI_MP_STATE) && check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); set_fwstate(pmlmepriv, _FW_LINKED); spin_unlock_bh(&pmlmepriv->lock); exit: rtw_free_cmd_obj(pcmd); }
// SPDX-License-Identifier: GPL-2.0 // // MediaTek ALSA SoC Audio DAI TDM Control // // Copyright (c) 2022 MediaTek Inc. // Author: Jiaxin Yu <[email protected]> #include <linux/regmap.h> #include <sound/pcm_params.h> #include "mt8186-afe-clk.h" #include "mt8186-afe-common.h" #include "mt8186-afe-gpio.h" #include "mt8186-interconnection.h" #define TDM_HD_EN_W_NAME "TDM_HD_EN" #define TDM_MCLK_EN_W_NAME "TDM_MCLK_EN" #define MTK_AFE_TDM_KCONTROL_NAME "TDM_HD_Mux" struct mtk_afe_tdm_priv { unsigned int id; unsigned int rate; /* for determine which apll to use */ unsigned int bck_invert; unsigned int lck_invert; unsigned int lrck_width; unsigned int mclk_id; unsigned int mclk_multiple; /* according to sample rate */ unsigned int mclk_rate; unsigned int mclk_apll; unsigned int tdm_mode; unsigned int data_mode; unsigned int slave_mode; unsigned int low_jitter_en; }; enum { TDM_IN_I2S = 0, TDM_IN_LJ = 1, TDM_IN_RJ = 2, TDM_IN_DSP_A = 4, TDM_IN_DSP_B = 5, }; enum { TDM_DATA_ONE_PIN = 0, TDM_DATA_MULTI_PIN, }; enum { TDM_BCK_NON_INV = 0, TDM_BCK_INV = 1, }; enum { TDM_LCK_NON_INV = 0, TDM_LCK_INV = 1, }; static unsigned int get_tdm_lrck_width(snd_pcm_format_t format, unsigned int mode) { if (mode == TDM_IN_DSP_A || mode == TDM_IN_DSP_B) return 0; return snd_pcm_format_physical_width(format) - 1; } static unsigned int get_tdm_ch_fixup(unsigned int channels) { if (channels > 4) return 8; else if (channels > 2) return 4; return 2; } static unsigned int get_tdm_ch_per_sdata(unsigned int mode, unsigned int channels) { if (mode == TDM_IN_DSP_A || mode == TDM_IN_DSP_B) return get_tdm_ch_fixup(channels); return 2; } enum { SUPPLY_SEQ_APLL, SUPPLY_SEQ_TDM_MCK_EN, SUPPLY_SEQ_TDM_HD_EN, SUPPLY_SEQ_TDM_EN, }; static int get_tdm_id_by_name(const char *name) { return MT8186_DAI_TDM_IN; } static int mtk_tdm_en_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); struct mt8186_afe_private *afe_priv = afe->platform_priv; int dai_id = get_tdm_id_by_name(w->name); struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; dev_dbg(cmpnt->dev, "%s(), name %s, event 0x%x\n", __func__, w->name, event); switch (event) { case SND_SOC_DAPM_PRE_PMU: mt8186_afe_gpio_request(afe->dev, true, tdm_priv->id, 0); break; case SND_SOC_DAPM_POST_PMD: mt8186_afe_gpio_request(afe->dev, false, tdm_priv->id, 0); break; default: break; } return 0; } static int mtk_tdm_mck_en_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); struct mt8186_afe_private *afe_priv = afe->platform_priv; int dai_id = get_tdm_id_by_name(w->name); struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; dev_dbg(cmpnt->dev, "%s(), name %s, event 0x%x, dai_id %d\n", __func__, w->name, event, dai_id); switch (event) { case SND_SOC_DAPM_PRE_PMU: mt8186_mck_enable(afe, tdm_priv->mclk_id, tdm_priv->mclk_rate); break; case SND_SOC_DAPM_POST_PMD: tdm_priv->mclk_rate = 0; mt8186_mck_disable(afe, tdm_priv->mclk_id); break; default: break; } return 0; } /* dai component */ /* tdm virtual mux to output widget */ static const char * const tdm_mux_map[] = { "Normal", "Dummy_Widget", }; static int tdm_mux_map_value[] = { 0, 1, }; static SOC_VALUE_ENUM_SINGLE_AUTODISABLE_DECL(tdm_mux_map_enum, SND_SOC_NOPM, 0, 1, tdm_mux_map, tdm_mux_map_value); static const struct snd_kcontrol_new tdm_in_mux_control = SOC_DAPM_ENUM("TDM In Select", tdm_mux_map_enum); static const struct snd_soc_dapm_widget mtk_dai_tdm_widgets[] = { SND_SOC_DAPM_CLOCK_SUPPLY("aud_tdm_clk"), SND_SOC_DAPM_SUPPLY_S("TDM_EN", SUPPLY_SEQ_TDM_EN, ETDM_IN1_CON0, ETDM_IN1_CON0_REG_ETDM_IN_EN_SFT, 0, mtk_tdm_en_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), /* tdm hd en */ SND_SOC_DAPM_SUPPLY_S(TDM_HD_EN_W_NAME, SUPPLY_SEQ_TDM_HD_EN, ETDM_IN1_CON2, ETDM_IN1_CON2_REG_CLOCK_SOURCE_SEL_SFT, 0, NULL, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SUPPLY_S(TDM_MCLK_EN_W_NAME, SUPPLY_SEQ_TDM_MCK_EN, SND_SOC_NOPM, 0, 0, mtk_tdm_mck_en_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_INPUT("TDM_DUMMY_IN"), SND_SOC_DAPM_MUX("TDM_In_Mux", SND_SOC_NOPM, 0, 0, &tdm_in_mux_control), }; static int mtk_afe_tdm_mclk_connect(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct snd_soc_dapm_widget *w = sink; struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); struct mt8186_afe_private *afe_priv = afe->platform_priv; int dai_id = get_tdm_id_by_name(w->name); struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; return (tdm_priv->mclk_rate > 0) ? 1 : 0; } static int mtk_afe_tdm_mclk_apll_connect(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct snd_soc_dapm_widget *w = sink; struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); struct mt8186_afe_private *afe_priv = afe->platform_priv; int dai_id = get_tdm_id_by_name(w->name); struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; int cur_apll; /* which apll */ cur_apll = mt8186_get_apll_by_name(afe, source->name); return (tdm_priv->mclk_apll == cur_apll) ? 1 : 0; } static int mtk_afe_tdm_hd_connect(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct snd_soc_dapm_widget *w = sink; struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); struct mt8186_afe_private *afe_priv = afe->platform_priv; int dai_id = get_tdm_id_by_name(w->name); struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; return tdm_priv->low_jitter_en; } static int mtk_afe_tdm_apll_connect(struct snd_soc_dapm_widget *source, struct snd_soc_dapm_widget *sink) { struct snd_soc_dapm_widget *w = sink; struct snd_soc_component *cmpnt = snd_soc_dapm_to_component(w->dapm); struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); struct mt8186_afe_private *afe_priv = afe->platform_priv; int dai_id = get_tdm_id_by_name(w->name); struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; int cur_apll; int tdm_need_apll; /* which apll */ cur_apll = mt8186_get_apll_by_name(afe, source->name); /* choose APLL from tdm rate */ tdm_need_apll = mt8186_get_apll_by_rate(afe, tdm_priv->rate); return (tdm_need_apll == cur_apll) ? 1 : 0; } /* low jitter control */ static const char * const mt8186_tdm_hd_str[] = { "Normal", "Low_Jitter" }; static const struct soc_enum mt8186_tdm_enum[] = { SOC_ENUM_SINGLE_EXT(ARRAY_SIZE(mt8186_tdm_hd_str), mt8186_tdm_hd_str), }; static int mt8186_tdm_hd_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); struct mt8186_afe_private *afe_priv = afe->platform_priv; int dai_id = get_tdm_id_by_name(kcontrol->id.name); struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; ucontrol->value.integer.value[0] = tdm_priv->low_jitter_en; return 0; } static int mt8186_tdm_hd_set(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *cmpnt = snd_soc_kcontrol_component(kcontrol); struct mtk_base_afe *afe = snd_soc_component_get_drvdata(cmpnt); struct mt8186_afe_private *afe_priv = afe->platform_priv; int dai_id = get_tdm_id_by_name(kcontrol->id.name); struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai_id]; struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; int hd_en; if (ucontrol->value.enumerated.item[0] >= e->items) return -EINVAL; hd_en = ucontrol->value.integer.value[0]; dev_dbg(afe->dev, "%s(), kcontrol name %s, hd_en %d\n", __func__, kcontrol->id.name, hd_en); if (tdm_priv->low_jitter_en == hd_en) return 0; tdm_priv->low_jitter_en = hd_en; return 1; } static const struct snd_kcontrol_new mtk_dai_tdm_controls[] = { SOC_ENUM_EXT(MTK_AFE_TDM_KCONTROL_NAME, mt8186_tdm_enum[0], mt8186_tdm_hd_get, mt8186_tdm_hd_set), }; static const struct snd_soc_dapm_route mtk_dai_tdm_routes[] = { {"TDM IN", NULL, "aud_tdm_clk"}, {"TDM IN", NULL, "TDM_EN"}, {"TDM IN", NULL, TDM_HD_EN_W_NAME, mtk_afe_tdm_hd_connect}, {TDM_HD_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_tdm_apll_connect}, {TDM_HD_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_tdm_apll_connect}, {"TDM IN", NULL, TDM_MCLK_EN_W_NAME, mtk_afe_tdm_mclk_connect}, {TDM_MCLK_EN_W_NAME, NULL, APLL1_W_NAME, mtk_afe_tdm_mclk_apll_connect}, {TDM_MCLK_EN_W_NAME, NULL, APLL2_W_NAME, mtk_afe_tdm_mclk_apll_connect}, /* allow tdm on without codec on */ {"TDM IN", NULL, "TDM_In_Mux"}, {"TDM_In_Mux", "Dummy_Widget", "TDM_DUMMY_IN"}, }; /* dai ops */ static int mtk_dai_tdm_cal_mclk(struct mtk_base_afe *afe, struct mtk_afe_tdm_priv *tdm_priv, int freq) { int apll; int apll_rate; apll = mt8186_get_apll_by_rate(afe, freq); apll_rate = mt8186_get_apll_rate(afe, apll); if (!freq || freq > apll_rate) { dev_err(afe->dev, "%s(), freq(%d Hz) invalid\n", __func__, freq); return -EINVAL; } if (apll_rate % freq != 0) { dev_err(afe->dev, "%s(), APLL cannot generate %d Hz", __func__, freq); return -EINVAL; } tdm_priv->mclk_rate = freq; tdm_priv->mclk_apll = apll; return 0; } static int mtk_dai_tdm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct mtk_base_afe *afe = snd_soc_dai_get_drvdata(dai); struct mt8186_afe_private *afe_priv = afe->platform_priv; int tdm_id = dai->id; struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[tdm_id]; unsigned int tdm_mode = tdm_priv->tdm_mode; unsigned int data_mode = tdm_priv->data_mode; unsigned int rate = params_rate(params); unsigned int channels = params_channels(params); snd_pcm_format_t format = params_format(params); unsigned int bit_width = snd_pcm_format_physical_width(format); unsigned int tdm_channels = (data_mode == TDM_DATA_ONE_PIN) ? get_tdm_ch_per_sdata(tdm_mode, channels) : 2; unsigned int lrck_width = get_tdm_lrck_width(format, tdm_mode); unsigned int tdm_con = 0; bool slave_mode = tdm_priv->slave_mode; bool lrck_inv = tdm_priv->lck_invert; bool bck_inv = tdm_priv->bck_invert; unsigned int tran_rate; unsigned int tran_relatch_rate; tdm_priv->rate = rate; tran_rate = mt8186_rate_transform(afe->dev, rate, dai->id); tran_relatch_rate = mt8186_tdm_relatch_rate_transform(afe->dev, rate); /* calculate mclk_rate, if not set explicitly */ if (!tdm_priv->mclk_rate) { tdm_priv->mclk_rate = rate * tdm_priv->mclk_multiple; mtk_dai_tdm_cal_mclk(afe, tdm_priv, tdm_priv->mclk_rate); } /* ETDM_IN1_CON0 */ tdm_con |= slave_mode << ETDM_IN1_CON0_REG_SLAVE_MODE_SFT; tdm_con |= tdm_mode << ETDM_IN1_CON0_REG_FMT_SFT; tdm_con |= (bit_width - 1) << ETDM_IN1_CON0_REG_BIT_LENGTH_SFT; tdm_con |= (bit_width - 1) << ETDM_IN1_CON0_REG_WORD_LENGTH_SFT; tdm_con |= (tdm_channels - 1) << ETDM_IN1_CON0_REG_CH_NUM_SFT; /* need to disable sync mode otherwise this may cause latch data error */ tdm_con |= 0 << ETDM_IN1_CON0_REG_SYNC_MODE_SFT; /* relatch 1x en clock fix to h26m */ tdm_con |= 0 << ETDM_IN1_CON0_REG_RELATCH_1X_EN_SEL_DOMAIN_SFT; regmap_update_bits(afe->regmap, ETDM_IN1_CON0, ETDM_IN_CON0_CTRL_MASK, tdm_con); /* ETDM_IN1_CON1 */ tdm_con = 0; tdm_con |= 0 << ETDM_IN1_CON1_REG_LRCK_AUTO_MODE_SFT; tdm_con |= 1 << ETDM_IN1_CON1_PINMUX_MCLK_CTRL_OE_SFT; tdm_con |= (lrck_width - 1) << ETDM_IN1_CON1_REG_LRCK_WIDTH_SFT; regmap_update_bits(afe->regmap, ETDM_IN1_CON1, ETDM_IN_CON1_CTRL_MASK, tdm_con); /* ETDM_IN1_CON3 */ tdm_con = ETDM_IN_CON3_FS(tran_rate); regmap_update_bits(afe->regmap, ETDM_IN1_CON3, ETDM_IN_CON3_CTRL_MASK, tdm_con); /* ETDM_IN1_CON4 */ tdm_con = ETDM_IN_CON4_FS(tran_relatch_rate); if (slave_mode) { if (lrck_inv) tdm_con |= ETDM_IN_CON4_CON0_SLAVE_LRCK_INV; if (bck_inv) tdm_con |= ETDM_IN_CON4_CON0_SLAVE_BCK_INV; } else { if (lrck_inv) tdm_con |= ETDM_IN_CON4_CON0_MASTER_LRCK_INV; if (bck_inv) tdm_con |= ETDM_IN_CON4_CON0_MASTER_BCK_INV; } regmap_update_bits(afe->regmap, ETDM_IN1_CON4, ETDM_IN_CON4_CTRL_MASK, tdm_con); /* ETDM_IN1_CON2 */ tdm_con = 0; if (data_mode == TDM_DATA_MULTI_PIN) { tdm_con |= ETDM_IN_CON2_MULTI_IP_2CH_MODE; tdm_con |= ETDM_IN_CON2_MULTI_IP_CH(channels); } regmap_update_bits(afe->regmap, ETDM_IN1_CON2, ETDM_IN_CON2_CTRL_MASK, tdm_con); /* ETDM_IN1_CON8 */ tdm_con = 0; if (slave_mode) { tdm_con |= 1 << ETDM_IN1_CON8_REG_ETDM_USE_AFIFO_SFT; tdm_con |= 0 << ETDM_IN1_CON8_REG_AFIFO_CLOCK_DOMAIN_SEL_SFT; tdm_con |= ETDM_IN_CON8_FS(tran_relatch_rate); } else { tdm_con |= 0 << ETDM_IN1_CON8_REG_ETDM_USE_AFIFO_SFT; } regmap_update_bits(afe->regmap, ETDM_IN1_CON8, ETDM_IN_CON8_CTRL_MASK, tdm_con); return 0; } static int mtk_dai_tdm_set_sysclk(struct snd_soc_dai *dai, int clk_id, unsigned int freq, int dir) { struct mtk_base_afe *afe = dev_get_drvdata(dai->dev); struct mt8186_afe_private *afe_priv = afe->platform_priv; struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai->id]; if (dir != SND_SOC_CLOCK_IN) { dev_err(afe->dev, "%s(), dir != SND_SOC_CLOCK_OUT", __func__); return -EINVAL; } dev_dbg(afe->dev, "%s(), freq %d\n", __func__, freq); return mtk_dai_tdm_cal_mclk(afe, tdm_priv, freq); } static int mtk_dai_tdm_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct mtk_base_afe *afe = dev_get_drvdata(dai->dev); struct mt8186_afe_private *afe_priv = afe->platform_priv; struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai->id]; /* DAI mode*/ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: tdm_priv->tdm_mode = TDM_IN_I2S; tdm_priv->data_mode = TDM_DATA_MULTI_PIN; break; case SND_SOC_DAIFMT_LEFT_J: tdm_priv->tdm_mode = TDM_IN_LJ; tdm_priv->data_mode = TDM_DATA_MULTI_PIN; break; case SND_SOC_DAIFMT_RIGHT_J: tdm_priv->tdm_mode = TDM_IN_RJ; tdm_priv->data_mode = TDM_DATA_MULTI_PIN; break; case SND_SOC_DAIFMT_DSP_A: tdm_priv->tdm_mode = TDM_IN_DSP_A; tdm_priv->data_mode = TDM_DATA_ONE_PIN; break; case SND_SOC_DAIFMT_DSP_B: tdm_priv->tdm_mode = TDM_IN_DSP_B; tdm_priv->data_mode = TDM_DATA_ONE_PIN; break; default: dev_err(afe->dev, "%s(), invalid DAIFMT_FORMAT_MASK", __func__); return -EINVAL; } /* DAI clock inversion*/ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: tdm_priv->bck_invert = TDM_BCK_NON_INV; tdm_priv->lck_invert = TDM_LCK_NON_INV; break; case SND_SOC_DAIFMT_NB_IF: tdm_priv->bck_invert = TDM_BCK_NON_INV; tdm_priv->lck_invert = TDM_LCK_INV; break; case SND_SOC_DAIFMT_IB_NF: tdm_priv->bck_invert = TDM_BCK_INV; tdm_priv->lck_invert = TDM_LCK_NON_INV; break; case SND_SOC_DAIFMT_IB_IF: tdm_priv->bck_invert = TDM_BCK_INV; tdm_priv->lck_invert = TDM_LCK_INV; break; default: dev_err(afe->dev, "%s(), invalid DAIFMT_INV_MASK", __func__); return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_CLOCK_PROVIDER_MASK) { case SND_SOC_DAIFMT_BP_FP: tdm_priv->slave_mode = false; break; case SND_SOC_DAIFMT_BC_FC: tdm_priv->slave_mode = true; break; default: dev_err(afe->dev, "%s(), invalid DAIFMT_CLOCK_PROVIDER_MASK", __func__); return -EINVAL; } return 0; } static int mtk_dai_tdm_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct mtk_base_afe *afe = dev_get_drvdata(dai->dev); struct mt8186_afe_private *afe_priv = afe->platform_priv; struct mtk_afe_tdm_priv *tdm_priv = afe_priv->dai_priv[dai->id]; dev_dbg(dai->dev, "%s %d slot_width %d\n", __func__, dai->id, slot_width); tdm_priv->lrck_width = slot_width; return 0; } static const struct snd_soc_dai_ops mtk_dai_tdm_ops = { .hw_params = mtk_dai_tdm_hw_params, .set_sysclk = mtk_dai_tdm_set_sysclk, .set_fmt = mtk_dai_tdm_set_fmt, .set_tdm_slot = mtk_dai_tdm_set_tdm_slot, }; /* dai driver */ #define MTK_TDM_RATES (SNDRV_PCM_RATE_8000_48000 |\ SNDRV_PCM_RATE_88200 |\ SNDRV_PCM_RATE_96000 |\ SNDRV_PCM_RATE_176400 |\ SNDRV_PCM_RATE_192000) #define MTK_TDM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE |\ SNDRV_PCM_FMTBIT_S24_LE |\ SNDRV_PCM_FMTBIT_S32_LE) static struct snd_soc_dai_driver mtk_dai_tdm_driver[] = { { .name = "TDM IN", .id = MT8186_DAI_TDM_IN, .capture = { .stream_name = "TDM IN", .channels_min = 2, .channels_max = 8, .rates = MTK_TDM_RATES, .formats = MTK_TDM_FORMATS, }, .ops = &mtk_dai_tdm_ops, }, }; static struct mtk_afe_tdm_priv *init_tdm_priv_data(struct mtk_base_afe *afe) { struct mtk_afe_tdm_priv *tdm_priv; tdm_priv = devm_kzalloc(afe->dev, sizeof(struct mtk_afe_tdm_priv), GFP_KERNEL); if (!tdm_priv) return NULL; tdm_priv->mclk_multiple = 512; tdm_priv->mclk_id = MT8186_TDM_MCK; tdm_priv->id = MT8186_DAI_TDM_IN; return tdm_priv; } int mt8186_dai_tdm_register(struct mtk_base_afe *afe) { struct mt8186_afe_private *afe_priv = afe->platform_priv; struct mtk_afe_tdm_priv *tdm_priv; struct mtk_base_afe_dai *dai; dai = devm_kzalloc(afe->dev, sizeof(*dai), GFP_KERNEL); if (!dai) return -ENOMEM; list_add(&dai->list, &afe->sub_dais); dai->dai_drivers = mtk_dai_tdm_driver; dai->num_dai_drivers = ARRAY_SIZE(mtk_dai_tdm_driver); dai->controls = mtk_dai_tdm_controls; dai->num_controls = ARRAY_SIZE(mtk_dai_tdm_controls); dai->dapm_widgets = mtk_dai_tdm_widgets; dai->num_dapm_widgets = ARRAY_SIZE(mtk_dai_tdm_widgets); dai->dapm_routes = mtk_dai_tdm_routes; dai->num_dapm_routes = ARRAY_SIZE(mtk_dai_tdm_routes); tdm_priv = init_tdm_priv_data(afe); if (!tdm_priv) return -ENOMEM; afe_priv->dai_priv[MT8186_DAI_TDM_IN] = tdm_priv; return 0; }
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) International Business Machines Corp., 2000-2003 */ #ifndef _H_JFS_FILSYS #define _H_JFS_FILSYS /* * jfs_filsys.h * * file system (implementation-dependent) constants * * refer to <limits.h> for system wide implementation-dependent constants */ /* * file system option (superblock flag) */ /* directory option */ #define JFS_UNICODE 0x00000001 /* unicode name */ /* mount time flags for error handling */ #define JFS_ERR_REMOUNT_RO 0x00000002 /* remount read-only */ #define JFS_ERR_CONTINUE 0x00000004 /* continue */ #define JFS_ERR_PANIC 0x00000008 /* panic */ #define JFS_ERR_MASK (JFS_ERR_REMOUNT_RO|JFS_ERR_CONTINUE|JFS_ERR_PANIC) /* Quota support */ #define JFS_USRQUOTA 0x00000010 #define JFS_GRPQUOTA 0x00000020 /* mount time flag to disable journaling to disk */ #define JFS_NOINTEGRITY 0x00000040 /* mount time flag to enable TRIM to ssd disks */ #define JFS_DISCARD 0x00000080 /* commit option */ #define JFS_COMMIT 0x00000f00 /* commit option mask */ #define JFS_GROUPCOMMIT 0x00000100 /* group (of 1) commit */ #define JFS_LAZYCOMMIT 0x00000200 /* lazy commit */ #define JFS_TMPFS 0x00000400 /* temporary file system - * do not log/commit: * Never implemented */ /* log logical volume option */ #define JFS_INLINELOG 0x00000800 /* inline log within file system */ #define JFS_INLINEMOVE 0x00001000 /* inline log being moved */ /* Secondary aggregate inode table */ #define JFS_BAD_SAIT 0x00010000 /* current secondary ait is bad */ /* sparse regular file support */ #define JFS_SPARSE 0x00020000 /* sparse regular file */ /* DASD Limits F226941 */ #define JFS_DASD_ENABLED 0x00040000 /* DASD limits enabled */ #define JFS_DASD_PRIME 0x00080000 /* Prime DASD usage on boot */ /* big endian flag */ #define JFS_SWAP_BYTES 0x00100000 /* running on big endian computer */ /* Directory index */ #define JFS_DIR_INDEX 0x00200000 /* Persistent index for */ /* platform options */ #define JFS_LINUX 0x10000000 /* Linux support */ #define JFS_DFS 0x20000000 /* DCE DFS LFS support */ /* Never implemented */ #define JFS_OS2 0x40000000 /* OS/2 support */ /* case-insensitive name/directory support */ #define JFS_AIX 0x80000000 /* AIX support */ /* * buffer cache configuration */ /* page size */ #ifdef PSIZE #undef PSIZE #endif #define PSIZE 4096 /* page size (in byte) */ #define L2PSIZE 12 /* log2(PSIZE) */ #define POFFSET 4095 /* offset within page */ /* buffer page size */ #define BPSIZE PSIZE /* * fs fundamental size * * PSIZE >= file system block size >= PBSIZE >= DISIZE */ #define PBSIZE 512 /* physical block size (in byte) */ #define L2PBSIZE 9 /* log2(PBSIZE) */ #define DISIZE 512 /* on-disk inode size (in byte) */ #define L2DISIZE 9 /* log2(DISIZE) */ #define IDATASIZE 256 /* inode inline data size */ #define IXATTRSIZE 128 /* inode inline extended attribute size */ #define XTPAGE_SIZE 4096 #define log2_PAGESIZE 12 #define IAG_SIZE 4096 #define IAG_EXTENT_SIZE 4096 #define INOSPERIAG 4096 /* number of disk inodes per iag */ #define L2INOSPERIAG 12 /* l2 number of disk inodes per iag */ #define INOSPEREXT 32 /* number of disk inode per extent */ #define L2INOSPEREXT 5 /* l2 number of disk inode per extent */ #define IXSIZE (DISIZE * INOSPEREXT) /* inode extent size */ #define INOSPERPAGE 8 /* number of disk inodes per 4K page */ #define L2INOSPERPAGE 3 /* log2(INOSPERPAGE) */ #define IAGFREELIST_LWM 64 #define INODE_EXTENT_SIZE IXSIZE /* inode extent size */ #define NUM_INODE_PER_EXTENT INOSPEREXT #define NUM_INODE_PER_IAG INOSPERIAG #define MINBLOCKSIZE 512 #define L2MINBLOCKSIZE 9 #define MAXBLOCKSIZE 4096 #define L2MAXBLOCKSIZE 12 #define MAXFILESIZE ((s64)1 << 52) #define JFS_LINK_MAX 0xffffffff /* Minimum number of bytes supported for a JFS partition */ #define MINJFS (0x1000000) #define MINJFSTEXT "16" /* * file system block size -> physical block size */ #define LBOFFSET(x) ((x) & (PBSIZE - 1)) #define LBNUMBER(x) ((x) >> L2PBSIZE) #define LBLK2PBLK(sb,b) ((b) << (sb->s_blocksize_bits - L2PBSIZE)) #define PBLK2LBLK(sb,b) ((b) >> (sb->s_blocksize_bits - L2PBSIZE)) /* size in byte -> last page number */ #define SIZE2PN(size) ( ((s64)((size) - 1)) >> (L2PSIZE) ) /* size in byte -> last file system block number */ #define SIZE2BN(size, l2bsize) ( ((s64)((size) - 1)) >> (l2bsize) ) /* * fixed physical block address (physical block size = 512 byte) * * NOTE: since we can't guarantee a physical block size of 512 bytes the use of * these macros should be removed and the byte offset macros used instead. */ #define SUPER1_B 64 /* primary superblock */ #define AIMAP_B (SUPER1_B + 8) /* 1st extent of aggregate inode map */ #define AITBL_B (AIMAP_B + 16) /* * 1st extent of aggregate inode table */ #define SUPER2_B (AITBL_B + 32) /* 2ndary superblock pbn */ #define BMAP_B (SUPER2_B + 8) /* block allocation map */ /* * SIZE_OF_SUPER defines the total amount of space reserved on disk for the * superblock. This is not the same as the superblock structure, since all of * this space is not currently being used. */ #define SIZE_OF_SUPER PSIZE /* * SIZE_OF_AG_TABLE defines the amount of space reserved to hold the AG table */ #define SIZE_OF_AG_TABLE PSIZE /* * SIZE_OF_MAP_PAGE defines the amount of disk space reserved for each page of * the inode allocation map (to hold iag) */ #define SIZE_OF_MAP_PAGE PSIZE /* * fixed byte offset address */ #define SUPER1_OFF 0x8000 /* primary superblock */ #define AIMAP_OFF (SUPER1_OFF + SIZE_OF_SUPER) /* * Control page of aggregate inode map * followed by 1st extent of map */ #define AITBL_OFF (AIMAP_OFF + (SIZE_OF_MAP_PAGE << 1)) /* * 1st extent of aggregate inode table */ #define SUPER2_OFF (AITBL_OFF + INODE_EXTENT_SIZE) /* * secondary superblock */ #define BMAP_OFF (SUPER2_OFF + SIZE_OF_SUPER) /* * block allocation map */ /* * The following macro is used to indicate the number of reserved disk blocks at * the front of an aggregate, in terms of physical blocks. This value is * currently defined to be 32K. This turns out to be the same as the primary * superblock's address, since it directly follows the reserved blocks. */ #define AGGR_RSVD_BLOCKS SUPER1_B /* * The following macro is used to indicate the number of reserved bytes at the * front of an aggregate. This value is currently defined to be 32K. This * turns out to be the same as the primary superblock's byte offset, since it * directly follows the reserved blocks. */ #define AGGR_RSVD_BYTES SUPER1_OFF /* * The following macro defines the byte offset for the first inode extent in * the aggregate inode table. This allows us to find the self inode to find the * rest of the table. Currently this value is 44K. */ #define AGGR_INODE_TABLE_START AITBL_OFF /* * fixed reserved inode number */ /* aggregate inode */ #define AGGR_RESERVED_I 0 /* aggregate inode (reserved) */ #define AGGREGATE_I 1 /* aggregate inode map inode */ #define BMAP_I 2 /* aggregate block allocation map inode */ #define LOG_I 3 /* aggregate inline log inode */ #define BADBLOCK_I 4 /* aggregate bad block inode */ #define FILESYSTEM_I 16 /* 1st/only fileset inode in ait: * fileset inode map inode */ /* per fileset inode */ #define FILESET_RSVD_I 0 /* fileset inode (reserved) */ #define FILESET_EXT_I 1 /* fileset inode extension */ #define ROOT_I 2 /* fileset root inode */ #define ACL_I 3 /* fileset ACL inode */ #define FILESET_OBJECT_I 4 /* the first fileset inode available for a file * or directory or link... */ #define FIRST_FILESET_INO 16 /* the first aggregate inode which describes * an inode. (To fsck this is also the first * inode in part 2 of the agg inode table.) */ /* * directory configuration */ #define JFS_NAME_MAX 255 #define JFS_PATH_MAX BPSIZE /* * file system state (superblock state) */ #define FM_CLEAN 0x00000000 /* file system is unmounted and clean */ #define FM_MOUNT 0x00000001 /* file system is mounted cleanly */ #define FM_DIRTY 0x00000002 /* file system was not unmounted and clean * when mounted or * commit failure occurred while being mounted: * fsck() must be run to repair */ #define FM_LOGREDO 0x00000004 /* log based recovery (logredo()) failed: * fsck() must be run to repair */ #define FM_EXTENDFS 0x00000008 /* file system extendfs() in progress */ #define FM_STATE_MAX 0x0000000f /* max value of s_state */ #endif /* _H_JFS_FILSYS */
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2018, Red Hat, Inc. * * Tests for SMM. */ #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <string.h> #include <sys/ioctl.h> #include "test_util.h" #include "kvm_util.h" #include "vmx.h" #include "svm_util.h" #define SMRAM_SIZE 65536 #define SMRAM_MEMSLOT ((1 << 16) | 1) #define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE) #define SMRAM_GPA 0x1000000 #define SMRAM_STAGE 0xfe #define STR(x) #x #define XSTR(s) STR(s) #define SYNC_PORT 0xe #define DONE 0xff /* * This is compiled as normal 64-bit code, however, SMI handler is executed * in real-address mode. To stay simple we're limiting ourselves to a mode * independent subset of asm here. * SMI handler always report back fixed stage SMRAM_STAGE. */ uint8_t smi_handler[] = { 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */ 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */ 0x0f, 0xaa, /* rsm */ }; static inline void sync_with_host(uint64_t phase) { asm volatile("in $" XSTR(SYNC_PORT)", %%al \n" : "+a" (phase)); } static void self_smi(void) { x2apic_write_reg(APIC_ICR, APIC_DEST_SELF | APIC_INT_ASSERT | APIC_DM_SMI); } static void l2_guest_code(void) { sync_with_host(8); sync_with_host(10); vmcall(); } static void guest_code(void *arg) { #define L2_GUEST_STACK_SIZE 64 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE]; uint64_t apicbase = rdmsr(MSR_IA32_APICBASE); struct svm_test_data *svm = arg; struct vmx_pages *vmx_pages = arg; sync_with_host(1); wrmsr(MSR_IA32_APICBASE, apicbase | X2APIC_ENABLE); sync_with_host(2); self_smi(); sync_with_host(4); if (arg) { if (this_cpu_has(X86_FEATURE_SVM)) { generic_svm_setup(svm, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); } else { GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages)); GUEST_ASSERT(load_vmcs(vmx_pages)); prepare_vmcs(vmx_pages, l2_guest_code, &l2_guest_stack[L2_GUEST_STACK_SIZE]); } sync_with_host(5); self_smi(); sync_with_host(7); if (this_cpu_has(X86_FEATURE_SVM)) { run_guest(svm->vmcb, svm->vmcb_gpa); run_guest(svm->vmcb, svm->vmcb_gpa); } else { vmlaunch(); vmresume(); } /* Stages 8-11 are eaten by SMM (SMRAM_STAGE reported instead) */ sync_with_host(12); } sync_with_host(DONE); } void inject_smi(struct kvm_vcpu *vcpu) { struct kvm_vcpu_events events; vcpu_events_get(vcpu, &events); events.smi.pending = 1; events.flags |= KVM_VCPUEVENT_VALID_SMM; vcpu_events_set(vcpu, &events); } int main(int argc, char *argv[]) { vm_vaddr_t nested_gva = 0; struct kvm_vcpu *vcpu; struct kvm_regs regs; struct kvm_vm *vm; struct kvm_x86_state *state; int stage, stage_reported; TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM)); /* Create VM */ vm = vm_create_with_one_vcpu(&vcpu, guest_code); vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA, SMRAM_MEMSLOT, SMRAM_PAGES, 0); TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT) == SMRAM_GPA, "could not allocate guest physical addresses?"); memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE); memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler, sizeof(smi_handler)); vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA); if (kvm_has_cap(KVM_CAP_NESTED_STATE)) { if (kvm_cpu_has(X86_FEATURE_SVM)) vcpu_alloc_svm(vm, &nested_gva); else if (kvm_cpu_has(X86_FEATURE_VMX)) vcpu_alloc_vmx(vm, &nested_gva); } if (!nested_gva) pr_info("will skip SMM test with VMX enabled\n"); vcpu_args_set(vcpu, 1, nested_gva); for (stage = 1;; stage++) { vcpu_run(vcpu); TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); memset(&regs, 0, sizeof(regs)); vcpu_regs_get(vcpu, &regs); stage_reported = regs.rax & 0xff; if (stage_reported == DONE) goto done; TEST_ASSERT(stage_reported == stage || stage_reported == SMRAM_STAGE, "Unexpected stage: #%x, got %x", stage, stage_reported); /* * Enter SMM during L2 execution and check that we correctly * return from it. Do not perform save/restore while in SMM yet. */ if (stage == 8) { inject_smi(vcpu); continue; } /* * Perform save/restore while the guest is in SMM triggered * during L2 execution. */ if (stage == 10) inject_smi(vcpu); state = vcpu_save_state(vcpu); kvm_vm_release(vm); vcpu = vm_recreate_with_one_vcpu(vm); vcpu_load_state(vcpu, state); kvm_x86_state_cleanup(state); } done: kvm_vm_free(vm); }
// SPDX-License-Identifier: GPL-2.0-only /* * Maxim MAX5522 * Dual, Ultra-Low-Power 10-Bit, Voltage-Output DACs * * Copyright 2022 Timesys Corp. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/iio/iio.h> #define MAX5522_MAX_ADDR 15 #define MAX5522_CTRL_NONE 0 #define MAX5522_CTRL_LOAD_IN_A 9 #define MAX5522_CTRL_LOAD_IN_B 10 #define MAX5522_REG_DATA(x) ((x) + MAX5522_CTRL_LOAD_IN_A) struct max5522_chip_info { const char *name; const struct iio_chan_spec *channels; unsigned int num_channels; }; struct max5522_state { struct regmap *regmap; const struct max5522_chip_info *chip_info; unsigned short dac_cache[2]; struct regulator *vrefin_reg; }; #define MAX5522_CHANNEL(chan) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ .channel = chan, \ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ BIT(IIO_CHAN_INFO_SCALE), \ .scan_type = { \ .sign = 'u', \ .realbits = 10, \ .storagebits = 16, \ .shift = 2, \ } \ } static const struct iio_chan_spec max5522_channels[] = { MAX5522_CHANNEL(0), MAX5522_CHANNEL(1), }; enum max5522_type { ID_MAX5522, }; static const struct max5522_chip_info max5522_chip_info_tbl[] = { [ID_MAX5522] = { .name = "max5522", .channels = max5522_channels, .num_channels = 2, }, }; static inline int max5522_info_to_reg(struct iio_chan_spec const *chan) { return MAX5522_REG_DATA(chan->channel); } static int max5522_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) { struct max5522_state *state = iio_priv(indio_dev); int ret; switch (info) { case IIO_CHAN_INFO_RAW: *val = state->dac_cache[chan->channel]; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: ret = regulator_get_voltage(state->vrefin_reg); if (ret < 0) return -EINVAL; *val = ret / 1000; *val2 = 10; return IIO_VAL_FRACTIONAL_LOG2; default: return -EINVAL; } return -EINVAL; } static int max5522_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long info) { struct max5522_state *state = iio_priv(indio_dev); int rval; if (val > 1023 || val < 0) return -EINVAL; rval = regmap_write(state->regmap, max5522_info_to_reg(chan), val << chan->scan_type.shift); if (rval < 0) return rval; state->dac_cache[chan->channel] = val; return 0; } static const struct iio_info max5522_info = { .read_raw = max5522_read_raw, .write_raw = max5522_write_raw, }; static const struct regmap_config max5522_regmap_config = { .reg_bits = 4, .val_bits = 12, .max_register = MAX5522_MAX_ADDR, }; static int max5522_spi_probe(struct spi_device *spi) { struct iio_dev *indio_dev; struct max5522_state *state; int ret; indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*state)); if (indio_dev == NULL) { dev_err(&spi->dev, "failed to allocate iio device\n"); return -ENOMEM; } state = iio_priv(indio_dev); state->chip_info = spi_get_device_match_data(spi); if (!state->chip_info) return -EINVAL; state->vrefin_reg = devm_regulator_get(&spi->dev, "vrefin"); if (IS_ERR(state->vrefin_reg)) return dev_err_probe(&spi->dev, PTR_ERR(state->vrefin_reg), "Vrefin regulator not specified\n"); ret = regulator_enable(state->vrefin_reg); if (ret) { return dev_err_probe(&spi->dev, ret, "Failed to enable vref regulators\n"); } state->regmap = devm_regmap_init_spi(spi, &max5522_regmap_config); if (IS_ERR(state->regmap)) return PTR_ERR(state->regmap); indio_dev->info = &max5522_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = max5522_channels; indio_dev->num_channels = ARRAY_SIZE(max5522_channels); indio_dev->name = max5522_chip_info_tbl[ID_MAX5522].name; return devm_iio_device_register(&spi->dev, indio_dev); } static const struct spi_device_id max5522_ids[] = { { "max5522", (kernel_ulong_t)&max5522_chip_info_tbl[ID_MAX5522] }, {} }; MODULE_DEVICE_TABLE(spi, max5522_ids); static const struct of_device_id max5522_of_match[] = { { .compatible = "maxim,max5522", .data = &max5522_chip_info_tbl[ID_MAX5522], }, {} }; MODULE_DEVICE_TABLE(of, max5522_of_match); static struct spi_driver max5522_spi_driver = { .driver = { .name = "max5522", .of_match_table = max5522_of_match, }, .probe = max5522_spi_probe, .id_table = max5522_ids, }; module_spi_driver(max5522_spi_driver); MODULE_AUTHOR("Angelo Dureghello <[email protected]"); MODULE_DESCRIPTION("MAX5522 DAC driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* Copyright (C) 2019 ARM Limited */ #include <ctype.h> #include <string.h> #include "testcases.h" bool validate_extra_context(struct extra_context *extra, char **err, void **extra_data, size_t *extra_size) { struct _aarch64_ctx *term; if (!extra || !err) return false; fprintf(stderr, "Validating EXTRA...\n"); term = GET_RESV_NEXT_HEAD(&extra->head); if (!term || term->magic || term->size) { *err = "Missing terminator after EXTRA context"; return false; } if (extra->datap & 0x0fUL) *err = "Extra DATAP misaligned"; else if (extra->size & 0x0fUL) *err = "Extra SIZE misaligned"; else if (extra->datap != (uint64_t)term + 0x10UL) *err = "Extra DATAP misplaced (not contiguous)"; if (*err) return false; *extra_data = (void *)extra->datap; *extra_size = extra->size; return true; } bool validate_sve_context(struct sve_context *sve, char **err) { /* Size will be rounded up to a multiple of 16 bytes */ size_t regs_size = ((SVE_SIG_CONTEXT_SIZE(sve_vq_from_vl(sve->vl)) + 15) / 16) * 16; if (!sve || !err) return false; /* Either a bare sve_context or a sve_context followed by regs data */ if ((sve->head.size != sizeof(struct sve_context)) && (sve->head.size != regs_size)) { *err = "bad size for SVE context"; return false; } if (!sve_vl_valid(sve->vl)) { *err = "SVE VL invalid"; return false; } return true; } bool validate_za_context(struct za_context *za, char **err) { /* Size will be rounded up to a multiple of 16 bytes */ size_t regs_size = ((ZA_SIG_CONTEXT_SIZE(sve_vq_from_vl(za->vl)) + 15) / 16) * 16; if (!za || !err) return false; /* Either a bare za_context or a za_context followed by regs data */ if ((za->head.size != sizeof(struct za_context)) && (za->head.size != regs_size)) { *err = "bad size for ZA context"; return false; } if (!sve_vl_valid(za->vl)) { *err = "SME VL in ZA context invalid"; return false; } return true; } bool validate_zt_context(struct zt_context *zt, char **err) { if (!zt || !err) return false; /* If the context is present there should be at least one register */ if (zt->nregs == 0) { *err = "no registers"; return false; } /* Size should agree with the number of registers */ if (zt->head.size != ZT_SIG_CONTEXT_SIZE(zt->nregs)) { *err = "register count does not match size"; return false; } return true; } bool validate_reserved(ucontext_t *uc, size_t resv_sz, char **err) { bool terminated = false; size_t offs = 0; int flags = 0; int new_flags, i; struct extra_context *extra = NULL; struct sve_context *sve = NULL; struct za_context *za = NULL; struct zt_context *zt = NULL; struct _aarch64_ctx *head = (struct _aarch64_ctx *)uc->uc_mcontext.__reserved; void *extra_data = NULL; size_t extra_sz = 0; char magic[4]; if (!err) return false; /* Walk till the end terminator verifying __reserved contents */ while (head && !terminated && offs < resv_sz) { if ((uint64_t)head & 0x0fUL) { *err = "Misaligned HEAD"; return false; } new_flags = 0; switch (head->magic) { case 0: if (head->size) { *err = "Bad size for terminator"; } else if (extra_data) { /* End of main data, walking the extra data */ head = extra_data; resv_sz = extra_sz; offs = 0; extra_data = NULL; extra_sz = 0; continue; } else { terminated = true; } break; case FPSIMD_MAGIC: if (flags & FPSIMD_CTX) *err = "Multiple FPSIMD_MAGIC"; else if (head->size != sizeof(struct fpsimd_context)) *err = "Bad size for fpsimd_context"; new_flags |= FPSIMD_CTX; break; case ESR_MAGIC: if (head->size != sizeof(struct esr_context)) *err = "Bad size for esr_context"; break; case POE_MAGIC: if (head->size != sizeof(struct poe_context)) *err = "Bad size for poe_context"; break; case TPIDR2_MAGIC: if (head->size != sizeof(struct tpidr2_context)) *err = "Bad size for tpidr2_context"; break; case SVE_MAGIC: if (flags & SVE_CTX) *err = "Multiple SVE_MAGIC"; /* Size is validated in validate_sve_context() */ sve = (struct sve_context *)head; new_flags |= SVE_CTX; break; case ZA_MAGIC: if (flags & ZA_CTX) *err = "Multiple ZA_MAGIC"; /* Size is validated in validate_za_context() */ za = (struct za_context *)head; new_flags |= ZA_CTX; break; case ZT_MAGIC: if (flags & ZT_CTX) *err = "Multiple ZT_MAGIC"; /* Size is validated in validate_za_context() */ zt = (struct zt_context *)head; new_flags |= ZT_CTX; break; case FPMR_MAGIC: if (flags & FPMR_CTX) *err = "Multiple FPMR_MAGIC"; else if (head->size != sizeof(struct fpmr_context)) *err = "Bad size for fpmr_context"; new_flags |= FPMR_CTX; break; case GCS_MAGIC: if (flags & GCS_CTX) *err = "Multiple GCS_MAGIC"; if (head->size != sizeof(struct gcs_context)) *err = "Bad size for gcs_context"; new_flags |= GCS_CTX; break; case EXTRA_MAGIC: if (flags & EXTRA_CTX) *err = "Multiple EXTRA_MAGIC"; else if (head->size != sizeof(struct extra_context)) *err = "Bad size for extra_context"; new_flags |= EXTRA_CTX; extra = (struct extra_context *)head; break; case KSFT_BAD_MAGIC: /* * This is a BAD magic header defined * artificially by a testcase and surely * unknown to the Kernel parse_user_sigframe(). * It MUST cause a Kernel induced SEGV */ *err = "BAD MAGIC !"; break; default: /* * A still unknown Magic: potentially freshly added * to the Kernel code and still unknown to the * tests. Magic numbers are supposed to be allocated * as somewhat meaningful ASCII strings so try to * print as such as well as the raw number. */ memcpy(magic, &head->magic, sizeof(magic)); for (i = 0; i < sizeof(magic); i++) if (!isalnum(magic[i])) magic[i] = '?'; fprintf(stdout, "SKIP Unknown MAGIC: 0x%X (%c%c%c%c) - Is KSFT arm64/signal up to date ?\n", head->magic, magic[3], magic[2], magic[1], magic[0]); break; } if (*err) return false; offs += head->size; if (resv_sz < offs + sizeof(*head)) { *err = "HEAD Overrun"; return false; } if (new_flags & EXTRA_CTX) if (!validate_extra_context(extra, err, &extra_data, &extra_sz)) return false; if (new_flags & SVE_CTX) if (!validate_sve_context(sve, err)) return false; if (new_flags & ZA_CTX) if (!validate_za_context(za, err)) return false; if (new_flags & ZT_CTX) if (!validate_zt_context(zt, err)) return false; flags |= new_flags; head = GET_RESV_NEXT_HEAD(head); } if (terminated && !(flags & FPSIMD_CTX)) { *err = "Missing FPSIMD"; return false; } if (terminated && (flags & ZT_CTX) && !(flags & ZA_CTX)) { *err = "ZT context but no ZA context"; return false; } return true; } /* * This function walks through the records inside the provided reserved area * trying to find enough space to fit @need_sz bytes: if not enough space is * available and an extra_context record is present, it throws away the * extra_context record. * * It returns a pointer to a new header where it is possible to start storing * our need_sz bytes. * * @shead: points to the start of reserved area * @need_sz: needed bytes * @resv_sz: reserved area size in bytes * @offset: if not null, this will be filled with the offset of the return * head pointer from @shead * * @return: pointer to a new head where to start storing need_sz bytes, or * NULL if space could not be made available. */ struct _aarch64_ctx *get_starting_head(struct _aarch64_ctx *shead, size_t need_sz, size_t resv_sz, size_t *offset) { size_t offs = 0; struct _aarch64_ctx *head; head = get_terminator(shead, resv_sz, &offs); /* not found a terminator...no need to update offset if any */ if (!head) return head; if (resv_sz - offs < need_sz) { fprintf(stderr, "Low on space:%zd. Discarding extra_context.\n", resv_sz - offs); head = get_header(shead, EXTRA_MAGIC, resv_sz, &offs); if (!head || resv_sz - offs < need_sz) { fprintf(stderr, "Failed to reclaim space on sigframe.\n"); return NULL; } } fprintf(stderr, "Available space:%zd\n", resv_sz - offs); if (offset) *offset = offs; return head; }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * OMAP44xx PRM instance offset macros * * Copyright (C) 2009-2011 Texas Instruments, Inc. * Copyright (C) 2009-2010 Nokia Corporation * * Paul Walmsley ([email protected]) * Rajendra Nayak ([email protected]) * Benoit Cousson ([email protected]) * * This file is automatically generated from the OMAP hardware databases. * We respectfully ask that any modifications to this file be coordinated * with the public [email protected] mailing list and the * authors above to ensure that the autogeneration scripts are kept * up-to-date with the file contents. * * XXX This file needs to be updated to align on one of "OMAP4", "OMAP44XX", * or "OMAP4430". */ #ifndef __ARCH_ARM_MACH_OMAP2_PRM44XX_H #define __ARCH_ARM_MACH_OMAP2_PRM44XX_H #include "prm44xx_54xx.h" #include "prm.h" #define OMAP4430_PRM_BASE 0x4a306000 #define OMAP44XX_PRM_REGADDR(inst, reg) \ OMAP2_L4_IO_ADDRESS(OMAP4430_PRM_BASE + (inst) + (reg)) /* PRM instances */ #define OMAP4430_PRM_OCP_SOCKET_INST 0x0000 #define OMAP4430_PRM_CKGEN_INST 0x0100 #define OMAP4430_PRM_MPU_INST 0x0300 #define OMAP4430_PRM_TESLA_INST 0x0400 #define OMAP4430_PRM_ABE_INST 0x0500 #define OMAP4430_PRM_ALWAYS_ON_INST 0x0600 #define OMAP4430_PRM_CORE_INST 0x0700 #define OMAP4430_PRM_IVAHD_INST 0x0f00 #define OMAP4430_PRM_CAM_INST 0x1000 #define OMAP4430_PRM_DSS_INST 0x1100 #define OMAP4430_PRM_GFX_INST 0x1200 #define OMAP4430_PRM_L3INIT_INST 0x1300 #define OMAP4430_PRM_L4PER_INST 0x1400 #define OMAP4430_PRM_CEFUSE_INST 0x1600 #define OMAP4430_PRM_WKUP_INST 0x1700 #define OMAP4430_PRM_WKUP_CM_INST 0x1800 #define OMAP4430_PRM_EMU_INST 0x1900 #define OMAP4430_PRM_EMU_CM_INST 0x1a00 #define OMAP4430_PRM_DEVICE_INST 0x1b00 /* PRM clockdomain register offsets (from instance start) */ #define OMAP4430_PRM_WKUP_CM_WKUP_CDOFFS 0x0000 #define OMAP4430_PRM_EMU_CM_EMU_CDOFFS 0x0000 /* OMAP4 specific register offsets */ #define OMAP4_RM_RSTST 0x0004 #define OMAP4_PM_PWSTCTRL 0x0000 #define OMAP4_PM_PWSTST 0x0004 /* PRM.OCP_SOCKET_PRM register offsets */ #define OMAP4_REVISION_PRM_OFFSET 0x0000 #define OMAP4_PRM_IRQSTATUS_MPU_OFFSET 0x0010 #define OMAP4430_PRM_IRQSTATUS_MPU OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0010) #define OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET 0x0014 #define OMAP4_PRM_IRQENABLE_MPU_OFFSET 0x0018 #define OMAP4430_PRM_IRQENABLE_MPU OMAP44XX_PRM_REGADDR(OMAP4430_PRM_OCP_SOCKET_INST, 0x0018) /* PRM.MPU_PRM register offsets */ #define OMAP4_RM_MPU_MPU_CONTEXT_OFFSET 0x0024 /* PRM.DEVICE_PRM register offsets */ #define OMAP4_PRM_RSTCTRL_OFFSET 0x0000 #define OMAP4_PRM_VOLTCTRL_OFFSET 0x0010 #define OMAP4_PRM_IO_PMCTRL_OFFSET 0x0020 #define OMAP4_PRM_VOLTSETUP_CORE_OFF_OFFSET 0x0028 #define OMAP4_PRM_VOLTSETUP_MPU_OFF_OFFSET 0x002c #define OMAP4_PRM_VOLTSETUP_IVA_OFF_OFFSET 0x0030 #define OMAP4_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET 0x0034 #define OMAP4_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET 0x0038 #define OMAP4_PRM_VOLTSETUP_IVA_RET_SLEEP_OFFSET 0x003c #define OMAP4_PRM_VP_CORE_CONFIG_OFFSET 0x0040 #define OMAP4_PRM_VP_CORE_STATUS_OFFSET 0x0044 #define OMAP4_PRM_VP_CORE_VLIMITTO_OFFSET 0x0048 #define OMAP4_PRM_VP_CORE_VOLTAGE_OFFSET 0x004c #define OMAP4_PRM_VP_CORE_VSTEPMAX_OFFSET 0x0050 #define OMAP4_PRM_VP_CORE_VSTEPMIN_OFFSET 0x0054 #define OMAP4_PRM_VP_MPU_CONFIG_OFFSET 0x0058 #define OMAP4_PRM_VP_MPU_STATUS_OFFSET 0x005c #define OMAP4_PRM_VP_MPU_VLIMITTO_OFFSET 0x0060 #define OMAP4_PRM_VP_MPU_VOLTAGE_OFFSET 0x0064 #define OMAP4_PRM_VP_MPU_VSTEPMAX_OFFSET 0x0068 #define OMAP4_PRM_VP_MPU_VSTEPMIN_OFFSET 0x006c #define OMAP4_PRM_VP_IVA_CONFIG_OFFSET 0x0070 #define OMAP4_PRM_VP_IVA_STATUS_OFFSET 0x0074 #define OMAP4_PRM_VP_IVA_VLIMITTO_OFFSET 0x0078 #define OMAP4_PRM_VP_IVA_VOLTAGE_OFFSET 0x007c #define OMAP4_PRM_VP_IVA_VSTEPMAX_OFFSET 0x0080 #define OMAP4_PRM_VP_IVA_VSTEPMIN_OFFSET 0x0084 #define OMAP4_PRM_VC_SMPS_SA_OFFSET 0x0088 #define OMAP4_PRM_VC_VAL_SMPS_RA_VOL_OFFSET 0x008c #define OMAP4_PRM_VC_VAL_SMPS_RA_CMD_OFFSET 0x0090 #define OMAP4_PRM_VC_VAL_CMD_VDD_CORE_L_OFFSET 0x0094 #define OMAP4_PRM_VC_VAL_CMD_VDD_MPU_L_OFFSET 0x0098 #define OMAP4_PRM_VC_VAL_CMD_VDD_IVA_L_OFFSET 0x009c #define OMAP4_PRM_VC_VAL_BYPASS_OFFSET 0x00a0 #define OMAP4_PRM_VC_CFG_CHANNEL_OFFSET 0x00a4 #define OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET 0x00a8 #define OMAP4_PRM_VC_CFG_I2C_CLK_OFFSET 0x00ac #endif
// SPDX-License-Identifier: GPL-2.0 /* * Greybus bundles * * Copyright 2014-2015 Google Inc. * Copyright 2014-2015 Linaro Ltd. */ #include <linux/greybus.h> #include "greybus_trace.h" static ssize_t bundle_class_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_bundle *bundle = to_gb_bundle(dev); return sprintf(buf, "0x%02x\n", bundle->class); } static DEVICE_ATTR_RO(bundle_class); static ssize_t bundle_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_bundle *bundle = to_gb_bundle(dev); return sprintf(buf, "%u\n", bundle->id); } static DEVICE_ATTR_RO(bundle_id); static ssize_t state_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gb_bundle *bundle = to_gb_bundle(dev); if (!bundle->state) return sprintf(buf, "\n"); return sprintf(buf, "%s\n", bundle->state); } static ssize_t state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct gb_bundle *bundle = to_gb_bundle(dev); kfree(bundle->state); bundle->state = kstrdup(buf, GFP_KERNEL); if (!bundle->state) return -ENOMEM; /* Tell userspace that the file contents changed */ sysfs_notify(&bundle->dev.kobj, NULL, "state"); return size; } static DEVICE_ATTR_RW(state); static struct attribute *bundle_attrs[] = { &dev_attr_bundle_class.attr, &dev_attr_bundle_id.attr, &dev_attr_state.attr, NULL, }; ATTRIBUTE_GROUPS(bundle); static struct gb_bundle *gb_bundle_find(struct gb_interface *intf, u8 bundle_id) { struct gb_bundle *bundle; list_for_each_entry(bundle, &intf->bundles, links) { if (bundle->id == bundle_id) return bundle; } return NULL; } static void gb_bundle_release(struct device *dev) { struct gb_bundle *bundle = to_gb_bundle(dev); trace_gb_bundle_release(bundle); kfree(bundle->state); kfree(bundle->cport_desc); kfree(bundle); } #ifdef CONFIG_PM static void gb_bundle_disable_all_connections(struct gb_bundle *bundle) { struct gb_connection *connection; list_for_each_entry(connection, &bundle->connections, bundle_links) gb_connection_disable(connection); } static void gb_bundle_enable_all_connections(struct gb_bundle *bundle) { struct gb_connection *connection; list_for_each_entry(connection, &bundle->connections, bundle_links) gb_connection_enable(connection); } static int gb_bundle_suspend(struct device *dev) { struct gb_bundle *bundle = to_gb_bundle(dev); const struct dev_pm_ops *pm = dev->driver->pm; int ret; if (pm && pm->runtime_suspend) { ret = pm->runtime_suspend(&bundle->dev); if (ret) return ret; } else { gb_bundle_disable_all_connections(bundle); } ret = gb_control_bundle_suspend(bundle->intf->control, bundle->id); if (ret) { if (pm && pm->runtime_resume) ret = pm->runtime_resume(dev); else gb_bundle_enable_all_connections(bundle); return ret; } return 0; } static int gb_bundle_resume(struct device *dev) { struct gb_bundle *bundle = to_gb_bundle(dev); const struct dev_pm_ops *pm = dev->driver->pm; int ret; ret = gb_control_bundle_resume(bundle->intf->control, bundle->id); if (ret) return ret; if (pm && pm->runtime_resume) { ret = pm->runtime_resume(dev); if (ret) return ret; } else { gb_bundle_enable_all_connections(bundle); } return 0; } static int gb_bundle_idle(struct device *dev) { pm_runtime_mark_last_busy(dev); pm_request_autosuspend(dev); return 0; } #endif static const struct dev_pm_ops gb_bundle_pm_ops = { SET_RUNTIME_PM_OPS(gb_bundle_suspend, gb_bundle_resume, gb_bundle_idle) }; const struct device_type greybus_bundle_type = { .name = "greybus_bundle", .release = gb_bundle_release, .pm = &gb_bundle_pm_ops, }; /* * Create a gb_bundle structure to represent a discovered * bundle. Returns a pointer to the new bundle or a null * pointer if a failure occurs due to memory exhaustion. */ struct gb_bundle *gb_bundle_create(struct gb_interface *intf, u8 bundle_id, u8 class) { struct gb_bundle *bundle; if (bundle_id == BUNDLE_ID_NONE) { dev_err(&intf->dev, "can't use bundle id %u\n", bundle_id); return NULL; } /* * Reject any attempt to reuse a bundle id. We initialize * these serially, so there's no need to worry about keeping * the interface bundle list locked here. */ if (gb_bundle_find(intf, bundle_id)) { dev_err(&intf->dev, "duplicate bundle id %u\n", bundle_id); return NULL; } bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); if (!bundle) return NULL; bundle->intf = intf; bundle->id = bundle_id; bundle->class = class; INIT_LIST_HEAD(&bundle->connections); bundle->dev.parent = &intf->dev; bundle->dev.bus = &greybus_bus_type; bundle->dev.type = &greybus_bundle_type; bundle->dev.groups = bundle_groups; bundle->dev.dma_mask = intf->dev.dma_mask; device_initialize(&bundle->dev); dev_set_name(&bundle->dev, "%s.%d", dev_name(&intf->dev), bundle_id); list_add(&bundle->links, &intf->bundles); trace_gb_bundle_create(bundle); return bundle; } int gb_bundle_add(struct gb_bundle *bundle) { int ret; ret = device_add(&bundle->dev); if (ret) { dev_err(&bundle->dev, "failed to register bundle: %d\n", ret); return ret; } trace_gb_bundle_add(bundle); return 0; } /* * Tear down a previously set up bundle. */ void gb_bundle_destroy(struct gb_bundle *bundle) { trace_gb_bundle_destroy(bundle); if (device_is_registered(&bundle->dev)) device_del(&bundle->dev); list_del(&bundle->links); put_device(&bundle->dev); }
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _NF_LOG_H #define _NF_LOG_H #include <linux/netfilter.h> #include <linux/netfilter/nf_log.h> /* Log tcp sequence, tcp options, ip options and uid owning local socket */ #define NF_LOG_DEFAULT_MASK 0x0f /* This flag indicates that copy_len field in nf_loginfo is set */ #define NF_LOG_F_COPY_LEN 0x1 enum nf_log_type { NF_LOG_TYPE_LOG = 0, NF_LOG_TYPE_ULOG, NF_LOG_TYPE_MAX }; struct nf_loginfo { u_int8_t type; union { struct { /* copy_len will be used iff you set * NF_LOG_F_COPY_LEN in flags */ u_int32_t copy_len; u_int16_t group; u_int16_t qthreshold; u_int16_t flags; } ulog; struct { u_int8_t level; u_int8_t logflags; } log; } u; }; typedef void nf_logfn(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li, const char *prefix); struct nf_logger { char *name; enum nf_log_type type; nf_logfn *logfn; struct module *me; }; /* sysctl_nf_log_all_netns - allow LOG target in all network namespaces */ extern int sysctl_nf_log_all_netns; /* Function to register/unregister log function. */ int nf_log_register(u_int8_t pf, struct nf_logger *logger); void nf_log_unregister(struct nf_logger *logger); int nf_log_set(struct net *net, u_int8_t pf, const struct nf_logger *logger); void nf_log_unset(struct net *net, const struct nf_logger *logger); int nf_log_bind_pf(struct net *net, u_int8_t pf, const struct nf_logger *logger); void nf_log_unbind_pf(struct net *net, u_int8_t pf); int nf_logger_find_get(int pf, enum nf_log_type type); void nf_logger_put(int pf, enum nf_log_type type); #define MODULE_ALIAS_NF_LOGGER(family, type) \ MODULE_ALIAS("nf-logger-" __stringify(family) "-" __stringify(type)) /* Calls the registered backend logging function */ __printf(8, 9) void nf_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li, const char *fmt, ...); __printf(8, 9) void nf_log_trace(struct net *net, u_int8_t pf, unsigned int hooknum, const struct sk_buff *skb, const struct net_device *in, const struct net_device *out, const struct nf_loginfo *li, const char *fmt, ...); struct nf_log_buf; struct nf_log_buf *nf_log_buf_open(void); __printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...); void nf_log_buf_close(struct nf_log_buf *m); #endif /* _NF_LOG_H */
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2022 Intel Corporation */ #ifndef __ICL_DSI_REGS_H__ #define __ICL_DSI_REGS_H__ #include "intel_display_reg_defs.h" /* Gen11 DSI */ #define _MMIO_DSI(tc, dsi0, dsi1) _MMIO_TRANS((tc) - TRANSCODER_DSI_0, \ dsi0, dsi1) #define _ICL_DSI_ESC_CLK_DIV0 0x6b090 #define _ICL_DSI_ESC_CLK_DIV1 0x6b890 #define ICL_DSI_ESC_CLK_DIV(port) _MMIO_PORT((port), \ _ICL_DSI_ESC_CLK_DIV0, \ _ICL_DSI_ESC_CLK_DIV1) #define _ICL_DPHY_ESC_CLK_DIV0 0x162190 #define _ICL_DPHY_ESC_CLK_DIV1 0x6C190 #define ICL_DPHY_ESC_CLK_DIV(port) _MMIO_PORT((port), \ _ICL_DPHY_ESC_CLK_DIV0, \ _ICL_DPHY_ESC_CLK_DIV1) #define ICL_BYTE_CLK_PER_ESC_CLK_MASK (0x1f << 16) #define ICL_BYTE_CLK_PER_ESC_CLK_SHIFT 16 #define ICL_ESC_CLK_DIV_MASK 0x1ff #define ICL_ESC_CLK_DIV_SHIFT 0 #define DSI_MAX_ESC_CLK 20000 /* in KHz */ #define _ADL_MIPIO_REG 0x180 #define ADL_MIPIO_DW(port, dw) _MMIO(_ICL_COMBOPHY(port) + _ADL_MIPIO_REG + 4 * (dw)) #define TX_ESC_CLK_DIV_PHY_SEL REGBIT(16) #define TX_ESC_CLK_DIV_PHY_MASK REG_GENMASK(23, 16) #define TX_ESC_CLK_DIV_PHY REG_FIELD_PREP(TX_ESC_CLK_DIV_PHY_MASK, 0x7f) #define _DSI_CMD_FRMCTL_0 0x6b034 #define _DSI_CMD_FRMCTL_1 0x6b834 #define DSI_CMD_FRMCTL(port) _MMIO_PORT(port, \ _DSI_CMD_FRMCTL_0,\ _DSI_CMD_FRMCTL_1) #define DSI_FRAME_UPDATE_REQUEST (1 << 31) #define DSI_PERIODIC_FRAME_UPDATE_ENABLE (1 << 29) #define DSI_NULL_PACKET_ENABLE (1 << 28) #define DSI_FRAME_IN_PROGRESS (1 << 0) #define _DSI_INTR_MASK_REG_0 0x6b070 #define _DSI_INTR_MASK_REG_1 0x6b870 #define DSI_INTR_MASK_REG(port) _MMIO_PORT(port, \ _DSI_INTR_MASK_REG_0,\ _DSI_INTR_MASK_REG_1) #define _DSI_INTR_IDENT_REG_0 0x6b074 #define _DSI_INTR_IDENT_REG_1 0x6b874 #define DSI_INTR_IDENT_REG(port) _MMIO_PORT(port, \ _DSI_INTR_IDENT_REG_0,\ _DSI_INTR_IDENT_REG_1) #define DSI_TE_EVENT (1 << 31) #define DSI_RX_DATA_OR_BTA_TERMINATED (1 << 30) #define DSI_TX_DATA (1 << 29) #define DSI_ULPS_ENTRY_DONE (1 << 28) #define DSI_NON_TE_TRIGGER_RECEIVED (1 << 27) #define DSI_HOST_CHKSUM_ERROR (1 << 26) #define DSI_HOST_MULTI_ECC_ERROR (1 << 25) #define DSI_HOST_SINGL_ECC_ERROR (1 << 24) #define DSI_HOST_CONTENTION_DETECTED (1 << 23) #define DSI_HOST_FALSE_CONTROL_ERROR (1 << 22) #define DSI_HOST_TIMEOUT_ERROR (1 << 21) #define DSI_HOST_LOW_POWER_TX_SYNC_ERROR (1 << 20) #define DSI_HOST_ESCAPE_MODE_ENTRY_ERROR (1 << 19) #define DSI_FRAME_UPDATE_DONE (1 << 16) #define DSI_PROTOCOL_VIOLATION_REPORTED (1 << 15) #define DSI_INVALID_TX_LENGTH (1 << 13) #define DSI_INVALID_VC (1 << 12) #define DSI_INVALID_DATA_TYPE (1 << 11) #define DSI_PERIPHERAL_CHKSUM_ERROR (1 << 10) #define DSI_PERIPHERAL_MULTI_ECC_ERROR (1 << 9) #define DSI_PERIPHERAL_SINGLE_ECC_ERROR (1 << 8) #define DSI_PERIPHERAL_CONTENTION_DETECTED (1 << 7) #define DSI_PERIPHERAL_FALSE_CTRL_ERROR (1 << 6) #define DSI_PERIPHERAL_TIMEOUT_ERROR (1 << 5) #define DSI_PERIPHERAL_LP_TX_SYNC_ERROR (1 << 4) #define DSI_PERIPHERAL_ESC_MODE_ENTRY_CMD_ERR (1 << 3) #define DSI_EOT_SYNC_ERROR (1 << 2) #define DSI_SOT_SYNC_ERROR (1 << 1) #define DSI_SOT_ERROR (1 << 0) /* ICL DSI MODE control */ #define _ICL_DSI_IO_MODECTL_0 0x6B094 #define _ICL_DSI_IO_MODECTL_1 0x6B894 #define ICL_DSI_IO_MODECTL(port) _MMIO_PORT(port, \ _ICL_DSI_IO_MODECTL_0, \ _ICL_DSI_IO_MODECTL_1) #define COMBO_PHY_MODE_DSI (1 << 0) /* TGL DSI Chicken register */ #define _TGL_DSI_CHKN_REG_0 0x6B0C0 #define _TGL_DSI_CHKN_REG_1 0x6B8C0 #define TGL_DSI_CHKN_REG(port) _MMIO_PORT(port, \ _TGL_DSI_CHKN_REG_0, \ _TGL_DSI_CHKN_REG_1) #define TGL_DSI_CHKN_LSHS_GB_MASK REG_GENMASK(15, 12) #define TGL_DSI_CHKN_LSHS_GB(byte_clocks) REG_FIELD_PREP(TGL_DSI_CHKN_LSHS_GB_MASK, \ (byte_clocks)) #define _ICL_DSI_T_INIT_MASTER_0 0x6b088 #define _ICL_DSI_T_INIT_MASTER_1 0x6b888 #define ICL_DSI_T_INIT_MASTER(port) _MMIO_PORT(port, \ _ICL_DSI_T_INIT_MASTER_0,\ _ICL_DSI_T_INIT_MASTER_1) #define DSI_T_INIT_MASTER_MASK REG_GENMASK(15, 0) #define _DPHY_CLK_TIMING_PARAM_0 0x162180 #define _DPHY_CLK_TIMING_PARAM_1 0x6c180 #define DPHY_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \ _DPHY_CLK_TIMING_PARAM_0,\ _DPHY_CLK_TIMING_PARAM_1) #define _DSI_CLK_TIMING_PARAM_0 0x6b080 #define _DSI_CLK_TIMING_PARAM_1 0x6b880 #define DSI_CLK_TIMING_PARAM(port) _MMIO_PORT(port, \ _DSI_CLK_TIMING_PARAM_0,\ _DSI_CLK_TIMING_PARAM_1) #define CLK_PREPARE_OVERRIDE (1 << 31) #define CLK_PREPARE(x) ((x) << 28) #define CLK_PREPARE_MASK (0x7 << 28) #define CLK_PREPARE_SHIFT 28 #define CLK_ZERO_OVERRIDE (1 << 27) #define CLK_ZERO(x) ((x) << 20) #define CLK_ZERO_MASK (0xf << 20) #define CLK_ZERO_SHIFT 20 #define CLK_PRE_OVERRIDE (1 << 19) #define CLK_PRE(x) ((x) << 16) #define CLK_PRE_MASK (0x3 << 16) #define CLK_PRE_SHIFT 16 #define CLK_POST_OVERRIDE (1 << 15) #define CLK_POST(x) ((x) << 8) #define CLK_POST_MASK (0x7 << 8) #define CLK_POST_SHIFT 8 #define CLK_TRAIL_OVERRIDE (1 << 7) #define CLK_TRAIL(x) ((x) << 0) #define CLK_TRAIL_MASK (0xf << 0) #define CLK_TRAIL_SHIFT 0 #define _DPHY_DATA_TIMING_PARAM_0 0x162184 #define _DPHY_DATA_TIMING_PARAM_1 0x6c184 #define DPHY_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \ _DPHY_DATA_TIMING_PARAM_0,\ _DPHY_DATA_TIMING_PARAM_1) #define _DSI_DATA_TIMING_PARAM_0 0x6B084 #define _DSI_DATA_TIMING_PARAM_1 0x6B884 #define DSI_DATA_TIMING_PARAM(port) _MMIO_PORT(port, \ _DSI_DATA_TIMING_PARAM_0,\ _DSI_DATA_TIMING_PARAM_1) #define HS_PREPARE_OVERRIDE (1 << 31) #define HS_PREPARE(x) ((x) << 24) #define HS_PREPARE_MASK (0x7 << 24) #define HS_PREPARE_SHIFT 24 #define HS_ZERO_OVERRIDE (1 << 23) #define HS_ZERO(x) ((x) << 16) #define HS_ZERO_MASK (0xf << 16) #define HS_ZERO_SHIFT 16 #define HS_TRAIL_OVERRIDE (1 << 15) #define HS_TRAIL(x) ((x) << 8) #define HS_TRAIL_MASK (0x7 << 8) #define HS_TRAIL_SHIFT 8 #define HS_EXIT_OVERRIDE (1 << 7) #define HS_EXIT(x) ((x) << 0) #define HS_EXIT_MASK (0x7 << 0) #define HS_EXIT_SHIFT 0 #define _DPHY_TA_TIMING_PARAM_0 0x162188 #define _DPHY_TA_TIMING_PARAM_1 0x6c188 #define DPHY_TA_TIMING_PARAM(port) _MMIO_PORT(port, \ _DPHY_TA_TIMING_PARAM_0,\ _DPHY_TA_TIMING_PARAM_1) #define _DSI_TA_TIMING_PARAM_0 0x6b098 #define _DSI_TA_TIMING_PARAM_1 0x6b898 #define DSI_TA_TIMING_PARAM(port) _MMIO_PORT(port, \ _DSI_TA_TIMING_PARAM_0,\ _DSI_TA_TIMING_PARAM_1) #define TA_SURE_OVERRIDE (1 << 31) #define TA_SURE(x) ((x) << 16) #define TA_SURE_MASK (0x1f << 16) #define TA_SURE_SHIFT 16 #define TA_GO_OVERRIDE (1 << 15) #define TA_GO(x) ((x) << 8) #define TA_GO_MASK (0xf << 8) #define TA_GO_SHIFT 8 #define TA_GET_OVERRIDE (1 << 7) #define TA_GET(x) ((x) << 0) #define TA_GET_MASK (0xf << 0) #define TA_GET_SHIFT 0 /* DSI transcoder configuration */ #define _DSI_TRANS_FUNC_CONF_0 0x6b030 #define _DSI_TRANS_FUNC_CONF_1 0x6b830 #define DSI_TRANS_FUNC_CONF(tc) _MMIO_DSI(tc, \ _DSI_TRANS_FUNC_CONF_0,\ _DSI_TRANS_FUNC_CONF_1) #define OP_MODE_MASK (0x3 << 28) #define OP_MODE_SHIFT 28 #define CMD_MODE_NO_GATE (0x0 << 28) #define CMD_MODE_TE_GATE (0x1 << 28) #define VIDEO_MODE_SYNC_EVENT (0x2 << 28) #define VIDEO_MODE_SYNC_PULSE (0x3 << 28) #define TE_SOURCE_GPIO (1 << 27) #define LINK_READY (1 << 20) #define PIX_FMT_MASK (0x3 << 16) #define PIX_FMT_SHIFT 16 #define PIX_FMT_RGB565 (0x0 << 16) #define PIX_FMT_RGB666_PACKED (0x1 << 16) #define PIX_FMT_RGB666_LOOSE (0x2 << 16) #define PIX_FMT_RGB888 (0x3 << 16) #define PIX_FMT_RGB101010 (0x4 << 16) #define PIX_FMT_RGB121212 (0x5 << 16) #define PIX_FMT_COMPRESSED (0x6 << 16) #define BGR_TRANSMISSION (1 << 15) #define PIX_VIRT_CHAN(x) ((x) << 12) #define PIX_VIRT_CHAN_MASK (0x3 << 12) #define PIX_VIRT_CHAN_SHIFT 12 #define PIX_BUF_THRESHOLD_MASK (0x3 << 10) #define PIX_BUF_THRESHOLD_SHIFT 10 #define PIX_BUF_THRESHOLD_1_4 (0x0 << 10) #define PIX_BUF_THRESHOLD_1_2 (0x1 << 10) #define PIX_BUF_THRESHOLD_3_4 (0x2 << 10) #define PIX_BUF_THRESHOLD_FULL (0x3 << 10) #define CONTINUOUS_CLK_MASK (0x3 << 8) #define CONTINUOUS_CLK_SHIFT 8 #define CLK_ENTER_LP_AFTER_DATA (0x0 << 8) #define CLK_HS_OR_LP (0x2 << 8) #define CLK_HS_CONTINUOUS (0x3 << 8) #define LINK_CALIBRATION_MASK (0x3 << 4) #define LINK_CALIBRATION_SHIFT 4 #define CALIBRATION_DISABLED (0x0 << 4) #define CALIBRATION_ENABLED_INITIAL_ONLY (0x2 << 4) #define CALIBRATION_ENABLED_INITIAL_PERIODIC (0x3 << 4) #define BLANKING_PACKET_ENABLE (1 << 2) #define S3D_ORIENTATION_LANDSCAPE (1 << 1) #define EOTP_DISABLED (1 << 0) #define _DSI_CMD_RXCTL_0 0x6b0d4 #define _DSI_CMD_RXCTL_1 0x6b8d4 #define DSI_CMD_RXCTL(tc) _MMIO_DSI(tc, \ _DSI_CMD_RXCTL_0,\ _DSI_CMD_RXCTL_1) #define READ_UNLOADS_DW (1 << 16) #define RECEIVED_UNASSIGNED_TRIGGER (1 << 15) #define RECEIVED_ACKNOWLEDGE_TRIGGER (1 << 14) #define RECEIVED_TEAR_EFFECT_TRIGGER (1 << 13) #define RECEIVED_RESET_TRIGGER (1 << 12) #define RECEIVED_PAYLOAD_WAS_LOST (1 << 11) #define RECEIVED_CRC_WAS_LOST (1 << 10) #define NUMBER_RX_PLOAD_DW_MASK (0xff << 0) #define NUMBER_RX_PLOAD_DW_SHIFT 0 #define _DSI_CMD_TXCTL_0 0x6b0d0 #define _DSI_CMD_TXCTL_1 0x6b8d0 #define DSI_CMD_TXCTL(tc) _MMIO_DSI(tc, \ _DSI_CMD_TXCTL_0,\ _DSI_CMD_TXCTL_1) #define KEEP_LINK_IN_HS (1 << 24) #define FREE_HEADER_CREDIT_MASK (0x1f << 8) #define FREE_HEADER_CREDIT_SHIFT 0x8 #define FREE_PLOAD_CREDIT_MASK (0xff << 0) #define FREE_PLOAD_CREDIT_SHIFT 0 #define MAX_HEADER_CREDIT 0x10 #define MAX_PLOAD_CREDIT 0x40 #define _DSI_CMD_TXHDR_0 0x6b100 #define _DSI_CMD_TXHDR_1 0x6b900 #define DSI_CMD_TXHDR(tc) _MMIO_DSI(tc, \ _DSI_CMD_TXHDR_0,\ _DSI_CMD_TXHDR_1) #define PAYLOAD_PRESENT (1 << 31) #define LP_DATA_TRANSFER (1 << 30) #define VBLANK_FENCE (1 << 29) #define PARAM_WC_MASK (0xffff << 8) #define PARAM_WC_LOWER_SHIFT 8 #define PARAM_WC_UPPER_SHIFT 16 #define VC_MASK (0x3 << 6) #define VC_SHIFT 6 #define DT_MASK (0x3f << 0) #define DT_SHIFT 0 #define _DSI_CMD_TXPYLD_0 0x6b104 #define _DSI_CMD_TXPYLD_1 0x6b904 #define DSI_CMD_TXPYLD(tc) _MMIO_DSI(tc, \ _DSI_CMD_TXPYLD_0,\ _DSI_CMD_TXPYLD_1) #define _DSI_LP_MSG_0 0x6b0d8 #define _DSI_LP_MSG_1 0x6b8d8 #define DSI_LP_MSG(tc) _MMIO_DSI(tc, \ _DSI_LP_MSG_0,\ _DSI_LP_MSG_1) #define LPTX_IN_PROGRESS (1 << 17) #define LINK_IN_ULPS (1 << 16) #define LINK_ULPS_TYPE_LP11 (1 << 8) #define LINK_ENTER_ULPS (1 << 0) /* DSI timeout registers */ #define _DSI_HSTX_TO_0 0x6b044 #define _DSI_HSTX_TO_1 0x6b844 #define DSI_HSTX_TO(tc) _MMIO_DSI(tc, \ _DSI_HSTX_TO_0,\ _DSI_HSTX_TO_1) #define HSTX_TIMEOUT_VALUE_MASK (0xffff << 16) #define HSTX_TIMEOUT_VALUE_SHIFT 16 #define HSTX_TIMEOUT_VALUE(x) ((x) << 16) #define HSTX_TIMED_OUT (1 << 0) #define _DSI_LPRX_HOST_TO_0 0x6b048 #define _DSI_LPRX_HOST_TO_1 0x6b848 #define DSI_LPRX_HOST_TO(tc) _MMIO_DSI(tc, \ _DSI_LPRX_HOST_TO_0,\ _DSI_LPRX_HOST_TO_1) #define LPRX_TIMED_OUT (1 << 16) #define LPRX_TIMEOUT_VALUE_MASK (0xffff << 0) #define LPRX_TIMEOUT_VALUE_SHIFT 0 #define LPRX_TIMEOUT_VALUE(x) ((x) << 0) #define _DSI_PWAIT_TO_0 0x6b040 #define _DSI_PWAIT_TO_1 0x6b840 #define DSI_PWAIT_TO(tc) _MMIO_DSI(tc, \ _DSI_PWAIT_TO_0,\ _DSI_PWAIT_TO_1) #define PRESET_TIMEOUT_VALUE_MASK (0xffff << 16) #define PRESET_TIMEOUT_VALUE_SHIFT 16 #define PRESET_TIMEOUT_VALUE(x) ((x) << 16) #define PRESPONSE_TIMEOUT_VALUE_MASK (0xffff << 0) #define PRESPONSE_TIMEOUT_VALUE_SHIFT 0 #define PRESPONSE_TIMEOUT_VALUE(x) ((x) << 0) #define _DSI_TA_TO_0 0x6b04c #define _DSI_TA_TO_1 0x6b84c #define DSI_TA_TO(tc) _MMIO_DSI(tc, \ _DSI_TA_TO_0,\ _DSI_TA_TO_1) #define TA_TIMED_OUT (1 << 16) #define TA_TIMEOUT_VALUE_MASK (0xffff << 0) #define TA_TIMEOUT_VALUE_SHIFT 0 #define TA_TIMEOUT_VALUE(x) ((x) << 0) #endif /* __ICL_DSI_REGS_H__ */
// SPDX-License-Identifier: GPL-2.0+ // // Copyright (C) 2015 - 2016 Samsung Electronics Co., Ltd. // // Authors: Inha Song <[email protected]> // Sylwester Nawrocki <[email protected]> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/gpio/consumer.h> #include <linux/module.h> #include <linux/of.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "i2s.h" #include "../codecs/wm5110.h" /* * The source clock is XCLKOUT with its mux set to the external fixed rate * oscillator (XXTI). */ #define MCLK_RATE 24000000U #define TM2_DAI_AIF1 0 #define TM2_DAI_AIF2 1 struct tm2_machine_priv { struct snd_soc_component *component; unsigned int sysclk_rate; struct gpio_desc *gpio_mic_bias; }; static int tm2_start_sysclk(struct snd_soc_card *card) { struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card); struct snd_soc_component *component = priv->component; int ret; ret = snd_soc_component_set_pll(component, WM5110_FLL1_REFCLK, ARIZONA_FLL_SRC_MCLK1, MCLK_RATE, priv->sysclk_rate); if (ret < 0) { dev_err(component->dev, "Failed to set FLL1 source: %d\n", ret); return ret; } ret = snd_soc_component_set_pll(component, WM5110_FLL1, ARIZONA_FLL_SRC_MCLK1, MCLK_RATE, priv->sysclk_rate); if (ret < 0) { dev_err(component->dev, "Failed to start FLL1: %d\n", ret); return ret; } ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_SYSCLK, ARIZONA_CLK_SRC_FLL1, priv->sysclk_rate, SND_SOC_CLOCK_IN); if (ret < 0) { dev_err(component->dev, "Failed to set SYSCLK source: %d\n", ret); return ret; } return 0; } static int tm2_stop_sysclk(struct snd_soc_card *card) { struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card); struct snd_soc_component *component = priv->component; int ret; ret = snd_soc_component_set_pll(component, WM5110_FLL1, 0, 0, 0); if (ret < 0) { dev_err(component->dev, "Failed to stop FLL1: %d\n", ret); return ret; } ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_SYSCLK, ARIZONA_CLK_SRC_FLL1, 0, 0); if (ret < 0) { dev_err(component->dev, "Failed to stop SYSCLK: %d\n", ret); return ret; } return 0; } static int tm2_aif1_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component; struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(rtd->card); switch (params_rate(params)) { case 4000: case 8000: case 12000: case 16000: case 24000: case 32000: case 48000: case 96000: case 192000: /* Highest possible SYSCLK frequency: 147.456MHz */ priv->sysclk_rate = 147456000U; break; case 11025: case 22050: case 44100: case 88200: case 176400: /* Highest possible SYSCLK frequency: 135.4752 MHz */ priv->sysclk_rate = 135475200U; break; default: dev_err(component->dev, "Not supported sample rate: %d\n", params_rate(params)); return -EINVAL; } return tm2_start_sysclk(rtd->card); } static const struct snd_soc_ops tm2_aif1_ops = { .hw_params = tm2_aif1_hw_params, }; static int tm2_aif2_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component; unsigned int asyncclk_rate; int ret; switch (params_rate(params)) { case 8000: case 12000: case 16000: /* Highest possible ASYNCCLK frequency: 49.152MHz */ asyncclk_rate = 49152000U; break; case 11025: /* Highest possible ASYNCCLK frequency: 45.1584 MHz */ asyncclk_rate = 45158400U; break; default: dev_err(component->dev, "Not supported sample rate: %d\n", params_rate(params)); return -EINVAL; } ret = snd_soc_component_set_pll(component, WM5110_FLL2_REFCLK, ARIZONA_FLL_SRC_MCLK1, MCLK_RATE, asyncclk_rate); if (ret < 0) { dev_err(component->dev, "Failed to set FLL2 source: %d\n", ret); return ret; } ret = snd_soc_component_set_pll(component, WM5110_FLL2, ARIZONA_FLL_SRC_MCLK1, MCLK_RATE, asyncclk_rate); if (ret < 0) { dev_err(component->dev, "Failed to start FLL2: %d\n", ret); return ret; } ret = snd_soc_component_set_sysclk(component, ARIZONA_CLK_ASYNCCLK, ARIZONA_CLK_SRC_FLL2, asyncclk_rate, SND_SOC_CLOCK_IN); if (ret < 0) { dev_err(component->dev, "Failed to set ASYNCCLK source: %d\n", ret); return ret; } return 0; } static int tm2_aif2_hw_free(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component; int ret; /* disable FLL2 */ ret = snd_soc_component_set_pll(component, WM5110_FLL2, ARIZONA_FLL_SRC_MCLK1, 0, 0); if (ret < 0) dev_err(component->dev, "Failed to stop FLL2: %d\n", ret); return ret; } static const struct snd_soc_ops tm2_aif2_ops = { .hw_params = tm2_aif2_hw_params, .hw_free = tm2_aif2_hw_free, }; static int tm2_hdmi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = snd_soc_substream_to_rtd(substream); struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); unsigned int bfs; int bitwidth, ret; bitwidth = snd_pcm_format_width(params_format(params)); if (bitwidth < 0) { dev_err(rtd->card->dev, "Invalid bit-width: %d\n", bitwidth); return bitwidth; } switch (bitwidth) { case 48: bfs = 64; break; case 16: bfs = 32; break; default: dev_err(rtd->card->dev, "Unsupported bit-width: %d\n", bitwidth); return -EINVAL; } switch (params_rate(params)) { case 48000: case 96000: case 192000: break; default: dev_err(rtd->card->dev, "Unsupported sample rate: %d\n", params_rate(params)); return -EINVAL; } ret = snd_soc_dai_set_sysclk(cpu_dai, SAMSUNG_I2S_OPCLK, 0, SAMSUNG_I2S_OPCLK_PCLK); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, SAMSUNG_I2S_DIV_BCLK, bfs); if (ret < 0) return ret; return 0; } static const struct snd_soc_ops tm2_hdmi_ops = { .hw_params = tm2_hdmi_hw_params, }; static int tm2_mic_bias(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_card *card = w->dapm->card; struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card); switch (event) { case SND_SOC_DAPM_PRE_PMU: gpiod_set_value_cansleep(priv->gpio_mic_bias, 1); break; case SND_SOC_DAPM_POST_PMD: gpiod_set_value_cansleep(priv->gpio_mic_bias, 0); break; } return 0; } static int tm2_set_bias_level(struct snd_soc_card *card, struct snd_soc_dapm_context *dapm, enum snd_soc_bias_level level) { struct snd_soc_pcm_runtime *rtd; rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[0]); if (dapm->dev != snd_soc_rtd_to_codec(rtd, 0)->dev) return 0; switch (level) { case SND_SOC_BIAS_STANDBY: if (card->dapm.bias_level == SND_SOC_BIAS_OFF) tm2_start_sysclk(card); break; case SND_SOC_BIAS_OFF: tm2_stop_sysclk(card); break; default: break; } return 0; } static struct snd_soc_aux_dev tm2_speaker_amp_dev; static int tm2_late_probe(struct snd_soc_card *card) { struct tm2_machine_priv *priv = snd_soc_card_get_drvdata(card); unsigned int ch_map[] = { 0, 1 }; struct snd_soc_dai *amp_pdm_dai; struct snd_soc_pcm_runtime *rtd; struct snd_soc_dai *aif1_dai; struct snd_soc_dai *aif2_dai; int ret; rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[TM2_DAI_AIF1]); aif1_dai = snd_soc_rtd_to_codec(rtd, 0); priv->component = snd_soc_rtd_to_codec(rtd, 0)->component; ret = snd_soc_dai_set_sysclk(aif1_dai, ARIZONA_CLK_SYSCLK, 0, 0); if (ret < 0) { dev_err(aif1_dai->dev, "Failed to set SYSCLK: %d\n", ret); return ret; } rtd = snd_soc_get_pcm_runtime(card, &card->dai_link[TM2_DAI_AIF2]); aif2_dai = snd_soc_rtd_to_codec(rtd, 0); ret = snd_soc_dai_set_sysclk(aif2_dai, ARIZONA_CLK_ASYNCCLK, 0, 0); if (ret < 0) { dev_err(aif2_dai->dev, "Failed to set ASYNCCLK: %d\n", ret); return ret; } amp_pdm_dai = snd_soc_find_dai(&tm2_speaker_amp_dev.dlc); if (!amp_pdm_dai) return -ENODEV; /* Set the MAX98504 V/I sense PDM Tx DAI channel mapping */ ret = snd_soc_dai_set_channel_map(amp_pdm_dai, ARRAY_SIZE(ch_map), ch_map, 0, NULL); if (ret < 0) return ret; ret = snd_soc_dai_set_tdm_slot(amp_pdm_dai, 0x3, 0x0, 2, 16); if (ret < 0) return ret; return 0; } static const struct snd_kcontrol_new tm2_controls[] = { SOC_DAPM_PIN_SWITCH("HP"), SOC_DAPM_PIN_SWITCH("SPK"), SOC_DAPM_PIN_SWITCH("RCV"), SOC_DAPM_PIN_SWITCH("VPS"), SOC_DAPM_PIN_SWITCH("HDMI"), SOC_DAPM_PIN_SWITCH("Main Mic"), SOC_DAPM_PIN_SWITCH("Sub Mic"), SOC_DAPM_PIN_SWITCH("Third Mic"), SOC_DAPM_PIN_SWITCH("Headset Mic"), }; static const struct snd_soc_dapm_widget tm2_dapm_widgets[] = { SND_SOC_DAPM_HP("HP", NULL), SND_SOC_DAPM_SPK("SPK", NULL), SND_SOC_DAPM_SPK("RCV", NULL), SND_SOC_DAPM_LINE("VPS", NULL), SND_SOC_DAPM_LINE("HDMI", NULL), SND_SOC_DAPM_MIC("Main Mic", tm2_mic_bias), SND_SOC_DAPM_MIC("Sub Mic", NULL), SND_SOC_DAPM_MIC("Third Mic", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), }; static const struct snd_soc_component_driver tm2_component = { .name = "tm2-audio", }; static struct snd_soc_dai_driver tm2_ext_dai[] = { { .name = "Voice call", .playback = { .channels_min = 1, .channels_max = 4, .rate_min = 8000, .rate_max = 48000, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000), .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .channels_min = 1, .channels_max = 4, .rate_min = 8000, .rate_max = 48000, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_48000), .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, { .name = "Bluetooth", .playback = { .channels_min = 1, .channels_max = 4, .rate_min = 8000, .rate_max = 16000, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000), .formats = SNDRV_PCM_FMTBIT_S16_LE, }, .capture = { .channels_min = 1, .channels_max = 2, .rate_min = 8000, .rate_max = 16000, .rates = (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_16000), .formats = SNDRV_PCM_FMTBIT_S16_LE, }, }, }; SND_SOC_DAILINK_DEFS(aif1, DAILINK_COMP_ARRAY(COMP_CPU(SAMSUNG_I2S_DAI)), DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm5110-aif1")), DAILINK_COMP_ARRAY(COMP_EMPTY())); SND_SOC_DAILINK_DEFS(voice, DAILINK_COMP_ARRAY(COMP_CPU(SAMSUNG_I2S_DAI)), DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm5110-aif2")), DAILINK_COMP_ARRAY(COMP_EMPTY())); SND_SOC_DAILINK_DEFS(bt, DAILINK_COMP_ARRAY(COMP_CPU(SAMSUNG_I2S_DAI)), DAILINK_COMP_ARRAY(COMP_CODEC(NULL, "wm5110-aif3")), DAILINK_COMP_ARRAY(COMP_EMPTY())); SND_SOC_DAILINK_DEFS(hdmi, DAILINK_COMP_ARRAY(COMP_EMPTY()), DAILINK_COMP_ARRAY(COMP_EMPTY()), DAILINK_COMP_ARRAY(COMP_EMPTY())); static struct snd_soc_dai_link tm2_dai_links[] = { { .name = "WM5110 AIF1", .stream_name = "HiFi Primary", .ops = &tm2_aif1_ops, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, SND_SOC_DAILINK_REG(aif1), }, { .name = "WM5110 Voice", .stream_name = "Voice call", .ops = &tm2_aif2_ops, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .ignore_suspend = 1, SND_SOC_DAILINK_REG(voice), }, { .name = "WM5110 BT", .stream_name = "Bluetooth", .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM, .ignore_suspend = 1, SND_SOC_DAILINK_REG(bt), }, { .name = "HDMI", .stream_name = "i2s1", .ops = &tm2_hdmi_ops, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, SND_SOC_DAILINK_REG(hdmi), } }; static struct snd_soc_card tm2_card = { .owner = THIS_MODULE, .dai_link = tm2_dai_links, .controls = tm2_controls, .num_controls = ARRAY_SIZE(tm2_controls), .dapm_widgets = tm2_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(tm2_dapm_widgets), .aux_dev = &tm2_speaker_amp_dev, .num_aux_devs = 1, .late_probe = tm2_late_probe, .set_bias_level = tm2_set_bias_level, }; static int tm2_probe(struct platform_device *pdev) { struct device_node *cpu_dai_node[2] = {}; struct device_node *codec_dai_node[2] = {}; const char *cells_name = NULL; struct device *dev = &pdev->dev; struct snd_soc_card *card = &tm2_card; struct tm2_machine_priv *priv; struct snd_soc_dai_link *dai_link; int num_codecs, ret, i; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; snd_soc_card_set_drvdata(card, priv); card->dev = dev; priv->gpio_mic_bias = devm_gpiod_get(dev, "mic-bias", GPIOD_OUT_HIGH); if (IS_ERR(priv->gpio_mic_bias)) { dev_err(dev, "Failed to get mic bias gpio\n"); return PTR_ERR(priv->gpio_mic_bias); } ret = snd_soc_of_parse_card_name(card, "model"); if (ret < 0) { dev_err(dev, "Card name is not specified\n"); return ret; } ret = snd_soc_of_parse_audio_routing(card, "audio-routing"); if (ret < 0) { /* Backwards compatible way */ ret = snd_soc_of_parse_audio_routing(card, "samsung,audio-routing"); if (ret < 0) { dev_err(dev, "Audio routing is not specified or invalid\n"); return ret; } } card->aux_dev[0].dlc.of_node = of_parse_phandle(dev->of_node, "audio-amplifier", 0); if (!card->aux_dev[0].dlc.of_node) { dev_err(dev, "audio-amplifier property invalid or missing\n"); return -EINVAL; } num_codecs = of_count_phandle_with_args(dev->of_node, "audio-codec", NULL); /* Skip the HDMI link if not specified in DT */ if (num_codecs > 1) { card->num_links = ARRAY_SIZE(tm2_dai_links); cells_name = "#sound-dai-cells"; } else { card->num_links = ARRAY_SIZE(tm2_dai_links) - 1; } for (i = 0; i < num_codecs; i++) { struct of_phandle_args args; ret = of_parse_phandle_with_args(dev->of_node, "i2s-controller", cells_name, i, &args); if (ret) { dev_err(dev, "i2s-controller property parse error: %d\n", i); ret = -EINVAL; goto dai_node_put; } cpu_dai_node[i] = args.np; codec_dai_node[i] = of_parse_phandle(dev->of_node, "audio-codec", i); if (!codec_dai_node[i]) { dev_err(dev, "audio-codec property parse error\n"); ret = -EINVAL; goto dai_node_put; } } /* Initialize WM5110 - I2S and HDMI - I2S1 DAI links */ for_each_card_prelinks(card, i, dai_link) { unsigned int dai_index = 0; /* WM5110 */ dai_link->cpus->name = NULL; dai_link->platforms->name = NULL; if (num_codecs > 1 && i == card->num_links - 1) dai_index = 1; /* HDMI */ dai_link->codecs->of_node = codec_dai_node[dai_index]; dai_link->cpus->of_node = cpu_dai_node[dai_index]; dai_link->platforms->of_node = cpu_dai_node[dai_index]; } if (num_codecs > 1) { struct of_phandle_args args; /* HDMI DAI link (I2S1) */ i = card->num_links - 1; ret = of_parse_phandle_with_fixed_args(dev->of_node, "audio-codec", 0, 1, &args); if (ret) { dev_err(dev, "audio-codec property parse error\n"); goto dai_node_put; } ret = snd_soc_get_dai_name(&args, &card->dai_link[i].codecs->dai_name); if (ret) { dev_err(dev, "Unable to get codec_dai_name\n"); goto dai_node_put; } } ret = devm_snd_soc_register_component(dev, &tm2_component, tm2_ext_dai, ARRAY_SIZE(tm2_ext_dai)); if (ret < 0) { dev_err(dev, "Failed to register component: %d\n", ret); goto dai_node_put; } ret = devm_snd_soc_register_card(dev, card); if (ret < 0) { dev_err_probe(dev, ret, "Failed to register card\n"); goto dai_node_put; } dai_node_put: for (i = 0; i < num_codecs; i++) { of_node_put(codec_dai_node[i]); of_node_put(cpu_dai_node[i]); } of_node_put(card->aux_dev[0].dlc.of_node); return ret; } static int tm2_pm_prepare(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); return tm2_stop_sysclk(card); } static void tm2_pm_complete(struct device *dev) { struct snd_soc_card *card = dev_get_drvdata(dev); tm2_start_sysclk(card); } static const struct dev_pm_ops tm2_pm_ops = { .prepare = tm2_pm_prepare, .suspend = snd_soc_suspend, .resume = snd_soc_resume, .complete = tm2_pm_complete, .freeze = snd_soc_suspend, .thaw = snd_soc_resume, .poweroff = snd_soc_poweroff, .restore = snd_soc_resume, }; static const struct of_device_id tm2_of_match[] = { { .compatible = "samsung,tm2-audio" }, { }, }; MODULE_DEVICE_TABLE(of, tm2_of_match); static struct platform_driver tm2_driver = { .driver = { .name = "tm2-audio", .pm = &tm2_pm_ops, .of_match_table = tm2_of_match, }, .probe = tm2_probe, }; module_platform_driver(tm2_driver); MODULE_AUTHOR("Inha Song <[email protected]>"); MODULE_DESCRIPTION("ALSA SoC Exynos TM2 Audio Support"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * Filename: target_core_transport.c * * This file contains the Generic Target Engine Core. * * (c) Copyright 2002-2013 Datera, Inc. * * Nicholas A. Bellinger <[email protected]> * ******************************************************************************/ #include <linux/net.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/kthread.h> #include <linux/in.h> #include <linux/cdrom.h> #include <linux/module.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> #include <linux/unaligned.h> #include <net/sock.h> #include <net/tcp.h> #include <scsi/scsi_proto.h> #include <scsi/scsi_common.h> #include <target/target_core_base.h> #include <target/target_core_backend.h> #include <target/target_core_fabric.h> #include "target_core_internal.h" #include "target_core_alua.h" #include "target_core_pr.h" #include "target_core_ua.h" #define CREATE_TRACE_POINTS #include <trace/events/target.h> static struct workqueue_struct *target_completion_wq; static struct workqueue_struct *target_submission_wq; static struct kmem_cache *se_sess_cache; struct kmem_cache *se_ua_cache; struct kmem_cache *t10_pr_reg_cache; struct kmem_cache *t10_alua_lu_gp_cache; struct kmem_cache *t10_alua_lu_gp_mem_cache; struct kmem_cache *t10_alua_tg_pt_gp_cache; struct kmem_cache *t10_alua_lba_map_cache; struct kmem_cache *t10_alua_lba_map_mem_cache; static void transport_complete_task_attr(struct se_cmd *cmd); static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason); static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, int err, bool write_pending); static void target_complete_ok_work(struct work_struct *work); int init_se_kmem_caches(void) { se_sess_cache = kmem_cache_create("se_sess_cache", sizeof(struct se_session), __alignof__(struct se_session), 0, NULL); if (!se_sess_cache) { pr_err("kmem_cache_create() for struct se_session" " failed\n"); goto out; } se_ua_cache = kmem_cache_create("se_ua_cache", sizeof(struct se_ua), __alignof__(struct se_ua), 0, NULL); if (!se_ua_cache) { pr_err("kmem_cache_create() for struct se_ua failed\n"); goto out_free_sess_cache; } t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache", sizeof(struct t10_pr_registration), __alignof__(struct t10_pr_registration), 0, NULL); if (!t10_pr_reg_cache) { pr_err("kmem_cache_create() for struct t10_pr_registration" " failed\n"); goto out_free_ua_cache; } t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache", sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), 0, NULL); if (!t10_alua_lu_gp_cache) { pr_err("kmem_cache_create() for t10_alua_lu_gp_cache" " failed\n"); goto out_free_pr_reg_cache; } t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache", sizeof(struct t10_alua_lu_gp_member), __alignof__(struct t10_alua_lu_gp_member), 0, NULL); if (!t10_alua_lu_gp_mem_cache) { pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_" "cache failed\n"); goto out_free_lu_gp_cache; } t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache", sizeof(struct t10_alua_tg_pt_gp), __alignof__(struct t10_alua_tg_pt_gp), 0, NULL); if (!t10_alua_tg_pt_gp_cache) { pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_" "cache failed\n"); goto out_free_lu_gp_mem_cache; } t10_alua_lba_map_cache = kmem_cache_create( "t10_alua_lba_map_cache", sizeof(struct t10_alua_lba_map), __alignof__(struct t10_alua_lba_map), 0, NULL); if (!t10_alua_lba_map_cache) { pr_err("kmem_cache_create() for t10_alua_lba_map_" "cache failed\n"); goto out_free_tg_pt_gp_cache; } t10_alua_lba_map_mem_cache = kmem_cache_create( "t10_alua_lba_map_mem_cache", sizeof(struct t10_alua_lba_map_member), __alignof__(struct t10_alua_lba_map_member), 0, NULL); if (!t10_alua_lba_map_mem_cache) { pr_err("kmem_cache_create() for t10_alua_lba_map_mem_" "cache failed\n"); goto out_free_lba_map_cache; } target_completion_wq = alloc_workqueue("target_completion", WQ_MEM_RECLAIM, 0); if (!target_completion_wq) goto out_free_lba_map_mem_cache; target_submission_wq = alloc_workqueue("target_submission", WQ_MEM_RECLAIM, 0); if (!target_submission_wq) goto out_free_completion_wq; return 0; out_free_completion_wq: destroy_workqueue(target_completion_wq); out_free_lba_map_mem_cache: kmem_cache_destroy(t10_alua_lba_map_mem_cache); out_free_lba_map_cache: kmem_cache_destroy(t10_alua_lba_map_cache); out_free_tg_pt_gp_cache: kmem_cache_destroy(t10_alua_tg_pt_gp_cache); out_free_lu_gp_mem_cache: kmem_cache_destroy(t10_alua_lu_gp_mem_cache); out_free_lu_gp_cache: kmem_cache_destroy(t10_alua_lu_gp_cache); out_free_pr_reg_cache: kmem_cache_destroy(t10_pr_reg_cache); out_free_ua_cache: kmem_cache_destroy(se_ua_cache); out_free_sess_cache: kmem_cache_destroy(se_sess_cache); out: return -ENOMEM; } void release_se_kmem_caches(void) { destroy_workqueue(target_submission_wq); destroy_workqueue(target_completion_wq); kmem_cache_destroy(se_sess_cache); kmem_cache_destroy(se_ua_cache); kmem_cache_destroy(t10_pr_reg_cache); kmem_cache_destroy(t10_alua_lu_gp_cache); kmem_cache_destroy(t10_alua_lu_gp_mem_cache); kmem_cache_destroy(t10_alua_tg_pt_gp_cache); kmem_cache_destroy(t10_alua_lba_map_cache); kmem_cache_destroy(t10_alua_lba_map_mem_cache); } /* This code ensures unique mib indexes are handed out. */ static DEFINE_SPINLOCK(scsi_mib_index_lock); static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX]; /* * Allocate a new row index for the entry type specified */ u32 scsi_get_new_index(scsi_index_t type) { u32 new_index; BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX)); spin_lock(&scsi_mib_index_lock); new_index = ++scsi_mib_index[type]; spin_unlock(&scsi_mib_index_lock); return new_index; } void transport_subsystem_check_init(void) { int ret; static int sub_api_initialized; if (sub_api_initialized) return; ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock"); if (ret != 0) pr_err("Unable to load target_core_iblock\n"); ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file"); if (ret != 0) pr_err("Unable to load target_core_file\n"); ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi"); if (ret != 0) pr_err("Unable to load target_core_pscsi\n"); ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user"); if (ret != 0) pr_err("Unable to load target_core_user\n"); sub_api_initialized = 1; } static void target_release_cmd_refcnt(struct percpu_ref *ref) { struct target_cmd_counter *cmd_cnt = container_of(ref, typeof(*cmd_cnt), refcnt); wake_up(&cmd_cnt->refcnt_wq); } struct target_cmd_counter *target_alloc_cmd_counter(void) { struct target_cmd_counter *cmd_cnt; int rc; cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL); if (!cmd_cnt) return NULL; init_completion(&cmd_cnt->stop_done); init_waitqueue_head(&cmd_cnt->refcnt_wq); atomic_set(&cmd_cnt->stopped, 0); rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0, GFP_KERNEL); if (rc) goto free_cmd_cnt; return cmd_cnt; free_cmd_cnt: kfree(cmd_cnt); return NULL; } EXPORT_SYMBOL_GPL(target_alloc_cmd_counter); void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt) { /* * Drivers like loop do not call target_stop_session during session * shutdown so we have to drop the ref taken at init time here. */ if (!atomic_read(&cmd_cnt->stopped)) percpu_ref_put(&cmd_cnt->refcnt); percpu_ref_exit(&cmd_cnt->refcnt); kfree(cmd_cnt); } EXPORT_SYMBOL_GPL(target_free_cmd_counter); /** * transport_init_session - initialize a session object * @se_sess: Session object pointer. * * The caller must have zero-initialized @se_sess before calling this function. */ void transport_init_session(struct se_session *se_sess) { INIT_LIST_HEAD(&se_sess->sess_list); INIT_LIST_HEAD(&se_sess->sess_acl_list); spin_lock_init(&se_sess->sess_cmd_lock); } EXPORT_SYMBOL(transport_init_session); /** * transport_alloc_session - allocate a session object and initialize it * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. */ struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops) { struct se_session *se_sess; se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL); if (!se_sess) { pr_err("Unable to allocate struct se_session from" " se_sess_cache\n"); return ERR_PTR(-ENOMEM); } transport_init_session(se_sess); se_sess->sup_prot_ops = sup_prot_ops; return se_sess; } EXPORT_SYMBOL(transport_alloc_session); /** * transport_alloc_session_tags - allocate target driver private data * @se_sess: Session pointer. * @tag_num: Maximum number of in-flight commands between initiator and target. * @tag_size: Size in bytes of the private data a target driver associates with * each command. */ int transport_alloc_session_tags(struct se_session *se_sess, unsigned int tag_num, unsigned int tag_size) { int rc; se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num, GFP_KERNEL | __GFP_RETRY_MAYFAIL); if (!se_sess->sess_cmd_map) { pr_err("Unable to allocate se_sess->sess_cmd_map\n"); return -ENOMEM; } rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1, false, GFP_KERNEL, NUMA_NO_NODE); if (rc < 0) { pr_err("Unable to init se_sess->sess_tag_pool," " tag_num: %u\n", tag_num); kvfree(se_sess->sess_cmd_map); se_sess->sess_cmd_map = NULL; return -ENOMEM; } return 0; } EXPORT_SYMBOL(transport_alloc_session_tags); /** * transport_init_session_tags - allocate a session and target driver private data * @tag_num: Maximum number of in-flight commands between initiator and target. * @tag_size: Size in bytes of the private data a target driver associates with * each command. * @sup_prot_ops: bitmask that defines which T10-PI modes are supported. */ static struct se_session * transport_init_session_tags(unsigned int tag_num, unsigned int tag_size, enum target_prot_op sup_prot_ops) { struct se_session *se_sess; int rc; if (tag_num != 0 && !tag_size) { pr_err("init_session_tags called with percpu-ida tag_num:" " %u, but zero tag_size\n", tag_num); return ERR_PTR(-EINVAL); } if (!tag_num && tag_size) { pr_err("init_session_tags called with percpu-ida tag_size:" " %u, but zero tag_num\n", tag_size); return ERR_PTR(-EINVAL); } se_sess = transport_alloc_session(sup_prot_ops); if (IS_ERR(se_sess)) return se_sess; rc = transport_alloc_session_tags(se_sess, tag_num, tag_size); if (rc < 0) { transport_free_session(se_sess); return ERR_PTR(-ENOMEM); } return se_sess; } /* * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called. */ void __transport_register_session( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct se_session *se_sess, void *fabric_sess_ptr) { const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo; unsigned char buf[PR_REG_ISID_LEN]; unsigned long flags; se_sess->se_tpg = se_tpg; se_sess->fabric_sess_ptr = fabric_sess_ptr; /* * Used by struct se_node_acl's under ConfigFS to locate active se_session-t * * Only set for struct se_session's that will actually be moving I/O. * eg: *NOT* discovery sessions. */ if (se_nacl) { /* * * Determine if fabric allows for T10-PI feature bits exposed to * initiators for device backends with !dev->dev_attrib.pi_prot_type. * * If so, then always save prot_type on a per se_node_acl node * basis and re-instate the previous sess_prot_type to avoid * disabling PI from below any previously initiator side * registered LUNs. */ if (se_nacl->saved_prot_type) se_sess->sess_prot_type = se_nacl->saved_prot_type; else if (tfo->tpg_check_prot_fabric_only) se_sess->sess_prot_type = se_nacl->saved_prot_type = tfo->tpg_check_prot_fabric_only(se_tpg); /* * If the fabric module supports an ISID based TransportID, * save this value in binary from the fabric I_T Nexus now. */ if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) { memset(&buf[0], 0, PR_REG_ISID_LEN); se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, &buf[0], PR_REG_ISID_LEN); se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]); } spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); /* * The se_nacl->nacl_sess pointer will be set to the * last active I_T Nexus for each struct se_node_acl. */ se_nacl->nacl_sess = se_sess; list_add_tail(&se_sess->sess_acl_list, &se_nacl->acl_sess_list); spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); } list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list); pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n", se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr); } EXPORT_SYMBOL(__transport_register_session); void transport_register_session( struct se_portal_group *se_tpg, struct se_node_acl *se_nacl, struct se_session *se_sess, void *fabric_sess_ptr) { unsigned long flags; spin_lock_irqsave(&se_tpg->session_lock, flags); __transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr); spin_unlock_irqrestore(&se_tpg->session_lock, flags); } EXPORT_SYMBOL(transport_register_session); struct se_session * target_setup_session(struct se_portal_group *tpg, unsigned int tag_num, unsigned int tag_size, enum target_prot_op prot_op, const char *initiatorname, void *private, int (*callback)(struct se_portal_group *, struct se_session *, void *)) { struct target_cmd_counter *cmd_cnt; struct se_session *sess; int rc; cmd_cnt = target_alloc_cmd_counter(); if (!cmd_cnt) return ERR_PTR(-ENOMEM); /* * If the fabric driver is using percpu-ida based pre allocation * of I/O descriptor tags, go ahead and perform that setup now.. */ if (tag_num != 0) sess = transport_init_session_tags(tag_num, tag_size, prot_op); else sess = transport_alloc_session(prot_op); if (IS_ERR(sess)) { rc = PTR_ERR(sess); goto free_cnt; } sess->cmd_cnt = cmd_cnt; sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg, (unsigned char *)initiatorname); if (!sess->se_node_acl) { rc = -EACCES; goto free_sess; } /* * Go ahead and perform any remaining fabric setup that is * required before transport_register_session(). */ if (callback != NULL) { rc = callback(tpg, sess, private); if (rc) goto free_sess; } transport_register_session(tpg, sess->se_node_acl, sess, private); return sess; free_sess: transport_free_session(sess); return ERR_PTR(rc); free_cnt: target_free_cmd_counter(cmd_cnt); return ERR_PTR(rc); } EXPORT_SYMBOL(target_setup_session); ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page) { struct se_session *se_sess; ssize_t len = 0; spin_lock_bh(&se_tpg->session_lock); list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) { if (!se_sess->se_node_acl) continue; if (!se_sess->se_node_acl->dynamic_node_acl) continue; if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE) break; len += snprintf(page + len, PAGE_SIZE - len, "%s\n", se_sess->se_node_acl->initiatorname); len += 1; /* Include NULL terminator */ } spin_unlock_bh(&se_tpg->session_lock); return len; } EXPORT_SYMBOL(target_show_dynamic_sessions); static void target_complete_nacl(struct kref *kref) { struct se_node_acl *nacl = container_of(kref, struct se_node_acl, acl_kref); struct se_portal_group *se_tpg = nacl->se_tpg; if (!nacl->dynamic_stop) { complete(&nacl->acl_free_comp); return; } mutex_lock(&se_tpg->acl_node_mutex); list_del_init(&nacl->acl_list); mutex_unlock(&se_tpg->acl_node_mutex); core_tpg_wait_for_nacl_pr_ref(nacl); core_free_device_list_for_node(nacl, se_tpg); kfree(nacl); } void target_put_nacl(struct se_node_acl *nacl) { kref_put(&nacl->acl_kref, target_complete_nacl); } EXPORT_SYMBOL(target_put_nacl); void transport_deregister_session_configfs(struct se_session *se_sess) { struct se_node_acl *se_nacl; unsigned long flags; /* * Used by struct se_node_acl's under ConfigFS to locate active struct se_session */ se_nacl = se_sess->se_node_acl; if (se_nacl) { spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); if (!list_empty(&se_sess->sess_acl_list)) list_del_init(&se_sess->sess_acl_list); /* * If the session list is empty, then clear the pointer. * Otherwise, set the struct se_session pointer from the tail * element of the per struct se_node_acl active session list. */ if (list_empty(&se_nacl->acl_sess_list)) se_nacl->nacl_sess = NULL; else { se_nacl->nacl_sess = container_of( se_nacl->acl_sess_list.prev, struct se_session, sess_acl_list); } spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); } } EXPORT_SYMBOL(transport_deregister_session_configfs); void transport_free_session(struct se_session *se_sess) { struct se_node_acl *se_nacl = se_sess->se_node_acl; /* * Drop the se_node_acl->nacl_kref obtained from within * core_tpg_get_initiator_node_acl(). */ if (se_nacl) { struct se_portal_group *se_tpg = se_nacl->se_tpg; const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo; unsigned long flags; se_sess->se_node_acl = NULL; /* * Also determine if we need to drop the extra ->cmd_kref if * it had been previously dynamically generated, and * the endpoint is not caching dynamic ACLs. */ mutex_lock(&se_tpg->acl_node_mutex); if (se_nacl->dynamic_node_acl && !se_tfo->tpg_check_demo_mode_cache(se_tpg)) { spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags); if (list_empty(&se_nacl->acl_sess_list)) se_nacl->dynamic_stop = true; spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags); if (se_nacl->dynamic_stop) list_del_init(&se_nacl->acl_list); } mutex_unlock(&se_tpg->acl_node_mutex); if (se_nacl->dynamic_stop) target_put_nacl(se_nacl); target_put_nacl(se_nacl); } if (se_sess->sess_cmd_map) { sbitmap_queue_free(&se_sess->sess_tag_pool); kvfree(se_sess->sess_cmd_map); } if (se_sess->cmd_cnt) target_free_cmd_counter(se_sess->cmd_cnt); kmem_cache_free(se_sess_cache, se_sess); } EXPORT_SYMBOL(transport_free_session); static int target_release_res(struct se_device *dev, void *data) { struct se_session *sess = data; if (dev->reservation_holder == sess) target_release_reservation(dev); return 0; } void transport_deregister_session(struct se_session *se_sess) { struct se_portal_group *se_tpg = se_sess->se_tpg; unsigned long flags; if (!se_tpg) { transport_free_session(se_sess); return; } spin_lock_irqsave(&se_tpg->session_lock, flags); list_del(&se_sess->sess_list); se_sess->se_tpg = NULL; se_sess->fabric_sess_ptr = NULL; spin_unlock_irqrestore(&se_tpg->session_lock, flags); /* * Since the session is being removed, release SPC-2 * reservations held by the session that is disappearing. */ target_for_each_device(target_release_res, se_sess); pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n", se_tpg->se_tpg_tfo->fabric_name); /* * If last kref is dropping now for an explicit NodeACL, awake sleeping * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group * removal context from within transport_free_session() code. * * For dynamic ACL, target_put_nacl() uses target_complete_nacl() * to release all remaining generate_node_acl=1 created ACL resources. */ transport_free_session(se_sess); } EXPORT_SYMBOL(transport_deregister_session); void target_remove_session(struct se_session *se_sess) { transport_deregister_session_configfs(se_sess); transport_deregister_session(se_sess); } EXPORT_SYMBOL(target_remove_session); static void target_remove_from_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; unsigned long flags; if (!dev) return; spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); if (cmd->state_active) { list_del(&cmd->state_list); cmd->state_active = false; } spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); } static void target_remove_from_tmr_list(struct se_cmd *cmd) { struct se_device *dev = NULL; unsigned long flags; if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) dev = cmd->se_tmr_req->tmr_dev; if (dev) { spin_lock_irqsave(&dev->se_tmr_lock, flags); if (cmd->se_tmr_req->tmr_dev) list_del_init(&cmd->se_tmr_req->tmr_list); spin_unlock_irqrestore(&dev->se_tmr_lock, flags); } } /* * This function is called by the target core after the target core has * finished processing a SCSI command or SCSI TMF. Both the regular command * processing code and the code for aborting commands can call this * function. CMD_T_STOP is set if and only if another thread is waiting * inside transport_wait_for_tasks() for t_transport_stop_comp. */ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) { unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. */ if (cmd->transport_state & CMD_T_STOP) { pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", __func__, __LINE__, cmd->tag); spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete_all(&cmd->t_transport_stop_comp); return 1; } cmd->transport_state &= ~CMD_T_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); /* * Some fabric modules like tcm_loop can release their internally * allocated I/O reference and struct se_cmd now. * * Fabric modules are expected to return '1' here if the se_cmd being * passed is released at this point, or zero if not being released. */ return cmd->se_tfo->check_stop_free(cmd); } static void transport_lun_remove_cmd(struct se_cmd *cmd) { struct se_lun *lun = cmd->se_lun; if (!lun) return; target_remove_from_state_list(cmd); target_remove_from_tmr_list(cmd); if (cmpxchg(&cmd->lun_ref_active, true, false)) percpu_ref_put(&lun->lun_ref); /* * Clear struct se_cmd->se_lun before the handoff to FE. */ cmd->se_lun = NULL; } static void target_complete_failure_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); transport_generic_request_failure(cmd, cmd->sense_reason); } /* * Used when asking transport to copy Sense Data from the underlying * Linux/SCSI struct scsi_cmnd */ static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; WARN_ON(!cmd->se_lun); if (!dev) return NULL; if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) return NULL; cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n", dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); return cmd->sense_buffer; } void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) { unsigned char *cmd_sense_buf; unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); cmd_sense_buf = transport_get_sense_buffer(cmd); if (!cmd_sense_buf) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); return; } cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); spin_unlock_irqrestore(&cmd->t_state_lock, flags); } EXPORT_SYMBOL(transport_copy_sense_to_cmd); static void target_handle_abort(struct se_cmd *cmd) { bool tas = cmd->transport_state & CMD_T_TAS; bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; int ret; pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); if (tas) { if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { cmd->scsi_status = SAM_STAT_TASK_ABORTED; pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag); trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); if (ret) { transport_handle_queue_full(cmd, cmd->se_dev, ret, false); return; } } else { cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; cmd->se_tfo->queue_tm_rsp(cmd); } } else { /* * Allow the fabric driver to unmap any resources before * releasing the descriptor via TFO->release_cmd(). */ cmd->se_tfo->aborted_task(cmd); if (ack_kref) WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); /* * To do: establish a unit attention condition on the I_T * nexus associated with cmd. See also the paragraph "Aborting * commands" in SAM. */ } WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); } static void target_abort_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); target_handle_abort(cmd); } static bool target_cmd_interrupted(struct se_cmd *cmd) { int post_ret; if (cmd->transport_state & CMD_T_ABORTED) { if (cmd->transport_complete_callback) cmd->transport_complete_callback(cmd, false, &post_ret); INIT_WORK(&cmd->work, target_abort_work); queue_work(target_completion_wq, &cmd->work); return true; } else if (cmd->transport_state & CMD_T_STOP) { if (cmd->transport_complete_callback) cmd->transport_complete_callback(cmd, false, &post_ret); complete_all(&cmd->t_transport_stop_comp); return true; } return false; } /* May be called from interrupt context so must not sleep. */ void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status, sense_reason_t sense_reason) { struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn; int success, cpu; unsigned long flags; if (target_cmd_interrupted(cmd)) return; cmd->scsi_status = scsi_status; cmd->sense_reason = sense_reason; spin_lock_irqsave(&cmd->t_state_lock, flags); switch (cmd->scsi_status) { case SAM_STAT_CHECK_CONDITION: if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) success = 1; else success = 0; break; default: success = 1; break; } cmd->t_state = TRANSPORT_COMPLETE; cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); spin_unlock_irqrestore(&cmd->t_state_lock, flags); INIT_WORK(&cmd->work, success ? target_complete_ok_work : target_complete_failure_work); if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID) cpu = cmd->cpuid; else cpu = wwn->cmd_compl_affinity; queue_work_on(cpu, target_completion_wq, &cmd->work); } EXPORT_SYMBOL(target_complete_cmd_with_sense); void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) { target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ? TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE : TCM_NO_SENSE); } EXPORT_SYMBOL(target_complete_cmd); void target_set_cmd_data_length(struct se_cmd *cmd, int length) { if (length < cmd->data_length) { if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { cmd->residual_count += cmd->data_length - length; } else { cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; cmd->residual_count = cmd->data_length - length; } cmd->data_length = length; } } EXPORT_SYMBOL(target_set_cmd_data_length); void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) { if (scsi_status == SAM_STAT_GOOD || cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) { target_set_cmd_data_length(cmd, length); } target_complete_cmd(cmd, scsi_status); } EXPORT_SYMBOL(target_complete_cmd_with_length); static void target_add_to_state_list(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; unsigned long flags; spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); if (!cmd->state_active) { list_add_tail(&cmd->state_list, &dev->queues[cmd->cpuid].state_list); cmd->state_active = true; } spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); } /* * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status */ static void transport_write_pending_qf(struct se_cmd *cmd); static void transport_complete_qf(struct se_cmd *cmd); void target_qf_do_work(struct work_struct *work) { struct se_device *dev = container_of(work, struct se_device, qf_work_queue); LIST_HEAD(qf_cmd_list); struct se_cmd *cmd, *cmd_tmp; spin_lock_irq(&dev->qf_cmd_lock); list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); spin_unlock_irq(&dev->qf_cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { list_del(&cmd->se_qf_node); atomic_dec_mb(&dev->dev_qf_count); pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" " context: %s\n", cmd->se_tfo->fabric_name, cmd, (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" : "UNKNOWN"); if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) transport_write_pending_qf(cmd); else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) transport_complete_qf(cmd); } } unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) { switch (cmd->data_direction) { case DMA_NONE: return "NONE"; case DMA_FROM_DEVICE: return "READ"; case DMA_TO_DEVICE: return "WRITE"; case DMA_BIDIRECTIONAL: return "BIDI"; default: break; } return "UNKNOWN"; } void transport_dump_dev_state( struct se_device *dev, char *b, int *bl) { *bl += sprintf(b + *bl, "Status: "); if (dev->export_count) *bl += sprintf(b + *bl, "ACTIVATED"); else *bl += sprintf(b + *bl, "DEACTIVATED"); *bl += sprintf(b + *bl, " Max Queue Depth: %d", dev->queue_depth); *bl += sprintf(b + *bl, " SectorSize: %u HwMaxSectors: %u\n", dev->dev_attrib.block_size, dev->dev_attrib.hw_max_sectors); *bl += sprintf(b + *bl, " "); } void transport_dump_vpd_proto_id( struct t10_vpd *vpd, unsigned char *p_buf, int p_buf_len) { unsigned char buf[VPD_TMP_BUF_SIZE]; int len; memset(buf, 0, VPD_TMP_BUF_SIZE); len = sprintf(buf, "T10 VPD Protocol Identifier: "); switch (vpd->protocol_identifier) { case 0x00: sprintf(buf+len, "Fibre Channel\n"); break; case 0x10: sprintf(buf+len, "Parallel SCSI\n"); break; case 0x20: sprintf(buf+len, "SSA\n"); break; case 0x30: sprintf(buf+len, "IEEE 1394\n"); break; case 0x40: sprintf(buf+len, "SCSI Remote Direct Memory Access" " Protocol\n"); break; case 0x50: sprintf(buf+len, "Internet SCSI (iSCSI)\n"); break; case 0x60: sprintf(buf+len, "SAS Serial SCSI Protocol\n"); break; case 0x70: sprintf(buf+len, "Automation/Drive Interface Transport" " Protocol\n"); break; case 0x80: sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n"); break; default: sprintf(buf+len, "Unknown 0x%02x\n", vpd->protocol_identifier); break; } if (p_buf) strncpy(p_buf, buf, p_buf_len); else pr_debug("%s", buf); } void transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83) { /* * Check if the Protocol Identifier Valid (PIV) bit is set.. * * from spc3r23.pdf section 7.5.1 */ if (page_83[1] & 0x80) { vpd->protocol_identifier = (page_83[0] & 0xf0); vpd->protocol_identifier_set = 1; transport_dump_vpd_proto_id(vpd, NULL, 0); } } EXPORT_SYMBOL(transport_set_vpd_proto_id); int transport_dump_vpd_assoc( struct t10_vpd *vpd, unsigned char *p_buf, int p_buf_len) { unsigned char buf[VPD_TMP_BUF_SIZE]; int ret = 0; int len; memset(buf, 0, VPD_TMP_BUF_SIZE); len = sprintf(buf, "T10 VPD Identifier Association: "); switch (vpd->association) { case 0x00: sprintf(buf+len, "addressed logical unit\n"); break; case 0x10: sprintf(buf+len, "target port\n"); break; case 0x20: sprintf(buf+len, "SCSI target device\n"); break; default: sprintf(buf+len, "Unknown 0x%02x\n", vpd->association); ret = -EINVAL; break; } if (p_buf) strncpy(p_buf, buf, p_buf_len); else pr_debug("%s", buf); return ret; } int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83) { /* * The VPD identification association.. * * from spc3r23.pdf Section 7.6.3.1 Table 297 */ vpd->association = (page_83[1] & 0x30); return transport_dump_vpd_assoc(vpd, NULL, 0); } EXPORT_SYMBOL(transport_set_vpd_assoc); int transport_dump_vpd_ident_type( struct t10_vpd *vpd, unsigned char *p_buf, int p_buf_len) { unsigned char buf[VPD_TMP_BUF_SIZE]; int ret = 0; int len; memset(buf, 0, VPD_TMP_BUF_SIZE); len = sprintf(buf, "T10 VPD Identifier Type: "); switch (vpd->device_identifier_type) { case 0x00: sprintf(buf+len, "Vendor specific\n"); break; case 0x01: sprintf(buf+len, "T10 Vendor ID based\n"); break; case 0x02: sprintf(buf+len, "EUI-64 based\n"); break; case 0x03: sprintf(buf+len, "NAA\n"); break; case 0x04: sprintf(buf+len, "Relative target port identifier\n"); break; case 0x08: sprintf(buf+len, "SCSI name string\n"); break; default: sprintf(buf+len, "Unsupported: 0x%02x\n", vpd->device_identifier_type); ret = -EINVAL; break; } if (p_buf) { if (p_buf_len < strlen(buf)+1) return -EINVAL; strncpy(p_buf, buf, p_buf_len); } else { pr_debug("%s", buf); } return ret; } int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83) { /* * The VPD identifier type.. * * from spc3r23.pdf Section 7.6.3.1 Table 298 */ vpd->device_identifier_type = (page_83[1] & 0x0f); return transport_dump_vpd_ident_type(vpd, NULL, 0); } EXPORT_SYMBOL(transport_set_vpd_ident_type); int transport_dump_vpd_ident( struct t10_vpd *vpd, unsigned char *p_buf, int p_buf_len) { unsigned char buf[VPD_TMP_BUF_SIZE]; int ret = 0; memset(buf, 0, VPD_TMP_BUF_SIZE); switch (vpd->device_identifier_code_set) { case 0x01: /* Binary */ snprintf(buf, sizeof(buf), "T10 VPD Binary Device Identifier: %s\n", &vpd->device_identifier[0]); break; case 0x02: /* ASCII */ snprintf(buf, sizeof(buf), "T10 VPD ASCII Device Identifier: %s\n", &vpd->device_identifier[0]); break; case 0x03: /* UTF-8 */ snprintf(buf, sizeof(buf), "T10 VPD UTF-8 Device Identifier: %s\n", &vpd->device_identifier[0]); break; default: sprintf(buf, "T10 VPD Device Identifier encoding unsupported:" " 0x%02x", vpd->device_identifier_code_set); ret = -EINVAL; break; } if (p_buf) strncpy(p_buf, buf, p_buf_len); else pr_debug("%s", buf); return ret; } int transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83) { static const char hex_str[] = "0123456789abcdef"; int j = 0, i = 4; /* offset to start of the identifier */ /* * The VPD Code Set (encoding) * * from spc3r23.pdf Section 7.6.3.1 Table 296 */ vpd->device_identifier_code_set = (page_83[0] & 0x0f); switch (vpd->device_identifier_code_set) { case 0x01: /* Binary */ vpd->device_identifier[j++] = hex_str[vpd->device_identifier_type]; while (i < (4 + page_83[3])) { vpd->device_identifier[j++] = hex_str[(page_83[i] & 0xf0) >> 4]; vpd->device_identifier[j++] = hex_str[page_83[i] & 0x0f]; i++; } break; case 0x02: /* ASCII */ case 0x03: /* UTF-8 */ while (i < (4 + page_83[3])) vpd->device_identifier[j++] = page_83[i++]; break; default: break; } return transport_dump_vpd_ident(vpd, NULL, 0); } EXPORT_SYMBOL(transport_set_vpd_ident); static sense_reason_t target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, unsigned int size) { u32 mtl; if (!cmd->se_tfo->max_data_sg_nents) return TCM_NO_SENSE; /* * Check if fabric enforced maximum SGL entries per I/O descriptor * exceeds se_cmd->data_length. If true, set SCF_UNDERFLOW_BIT + * residual_count and reduce original cmd->data_length to maximum * length based on single PAGE_SIZE entry scatter-lists. */ mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); if (cmd->data_length > mtl) { /* * If an existing CDB overflow is present, calculate new residual * based on CDB size minus fabric maximum transfer length. * * If an existing CDB underflow is present, calculate new residual * based on original cmd->data_length minus fabric maximum transfer * length. * * Otherwise, set the underflow residual based on cmd->data_length * minus fabric maximum transfer length. */ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { cmd->residual_count = (size - mtl); } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { u32 orig_dl = size + cmd->residual_count; cmd->residual_count = (orig_dl - mtl); } else { cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; cmd->residual_count = (cmd->data_length - mtl); } cmd->data_length = mtl; /* * Reset sbc_check_prot() calculated protection payload * length based upon the new smaller MTL. */ if (cmd->prot_length) { u32 sectors = (mtl / dev->dev_attrib.block_size); cmd->prot_length = dev->prot_length * sectors; } } return TCM_NO_SENSE; } /** * target_cmd_size_check - Check whether there will be a residual. * @cmd: SCSI command. * @size: Data buffer size derived from CDB. The data buffer size provided by * the SCSI transport driver is available in @cmd->data_length. * * Compare the data buffer size from the CDB with the data buffer limit from the transport * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary. * * Note: target drivers set @cmd->data_length by calling __target_init_cmd(). * * Return: TCM_NO_SENSE */ sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size) { struct se_device *dev = cmd->se_dev; if (cmd->unknown_data_length) { cmd->data_length = size; } else if (size != cmd->data_length) { pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:" " %u does not match SCSI CDB Length: %u for SAM Opcode:" " 0x%02x\n", cmd->se_tfo->fabric_name, cmd->data_length, size, cmd->t_task_cdb[0]); /* * For READ command for the overflow case keep the existing * fabric provided ->data_length. Otherwise for the underflow * case, reset ->data_length to the smaller SCSI expected data * transfer length. */ if (size > cmd->data_length) { cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; cmd->residual_count = (size - cmd->data_length); } else { cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; cmd->residual_count = (cmd->data_length - size); /* * Do not truncate ->data_length for WRITE command to * dump all payload */ if (cmd->data_direction == DMA_FROM_DEVICE) { cmd->data_length = size; } } if (cmd->data_direction == DMA_TO_DEVICE) { if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { pr_err_ratelimited("Rejecting underflow/overflow" " for WRITE data CDB\n"); return TCM_INVALID_FIELD_IN_COMMAND_IU; } /* * Some fabric drivers like iscsi-target still expect to * always reject overflow writes. Reject this case until * full fabric driver level support for overflow writes * is introduced tree-wide. */ if (size > cmd->data_length) { pr_err_ratelimited("Rejecting overflow for" " WRITE control CDB\n"); return TCM_INVALID_CDB_FIELD; } } } return target_check_max_data_sg_nents(cmd, dev, size); } /* * Used by fabric modules containing a local struct se_cmd within their * fabric dependent per I/O descriptor. * * Preserves the value of @cmd->tag. */ void __target_init_cmd(struct se_cmd *cmd, const struct target_core_fabric_ops *tfo, struct se_session *se_sess, u32 data_length, int data_direction, int task_attr, unsigned char *sense_buffer, u64 unpacked_lun, struct target_cmd_counter *cmd_cnt) { INIT_LIST_HEAD(&cmd->se_delayed_node); INIT_LIST_HEAD(&cmd->se_qf_node); INIT_LIST_HEAD(&cmd->state_list); init_completion(&cmd->t_transport_stop_comp); cmd->free_compl = NULL; cmd->abrt_compl = NULL; spin_lock_init(&cmd->t_state_lock); INIT_WORK(&cmd->work, NULL); kref_init(&cmd->cmd_kref); cmd->t_task_cdb = &cmd->__t_task_cdb[0]; cmd->se_tfo = tfo; cmd->se_sess = se_sess; cmd->data_length = data_length; cmd->data_direction = data_direction; cmd->sam_task_attr = task_attr; cmd->sense_buffer = sense_buffer; cmd->orig_fe_lun = unpacked_lun; cmd->cmd_cnt = cmd_cnt; if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) cmd->cpuid = raw_smp_processor_id(); cmd->state_active = false; } EXPORT_SYMBOL(__target_init_cmd); static sense_reason_t transport_check_alloc_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; /* * Check if SAM Task Attribute emulation is enabled for this * struct se_device storage object */ if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return 0; if (cmd->sam_task_attr == TCM_ACA_TAG) { pr_debug("SAM Task Attribute ACA" " emulation is not supported\n"); return TCM_INVALID_CDB_FIELD; } return 0; } sense_reason_t target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp) { sense_reason_t ret; /* * Ensure that the received CDB is less than the max (252 + 8) bytes * for VARIABLE_LENGTH_CMD */ if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) { pr_err("Received SCSI CDB with command_size: %d that" " exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n", scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE); ret = TCM_INVALID_CDB_FIELD; goto err; } /* * If the received CDB is larger than TCM_MAX_COMMAND_SIZE, * allocate the additional extended CDB buffer now.. Otherwise * setup the pointer from __t_task_cdb to t_task_cdb. */ if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); if (!cmd->t_task_cdb) { pr_err("Unable to allocate cmd->t_task_cdb" " %u > sizeof(cmd->__t_task_cdb): %lu ops\n", scsi_command_size(cdb), (unsigned long)sizeof(cmd->__t_task_cdb)); ret = TCM_OUT_OF_RESOURCES; goto err; } } /* * Copy the original CDB into cmd-> */ memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); trace_target_sequencer_start(cmd); return 0; err: /* * Copy the CDB here to allow trace_target_cmd_complete() to * print the cdb to the trace buffers. */ memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb), (unsigned int)TCM_MAX_COMMAND_SIZE)); return ret; } EXPORT_SYMBOL(target_cmd_init_cdb); sense_reason_t target_cmd_parse_cdb(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; sense_reason_t ret; ret = dev->transport->parse_cdb(cmd); if (ret == TCM_UNSUPPORTED_SCSI_OPCODE) pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n", cmd->se_tfo->fabric_name, cmd->se_sess->se_node_acl->initiatorname, cmd->t_task_cdb[0]); if (ret) return ret; ret = transport_check_alloc_task_attr(cmd); if (ret) return ret; cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); return 0; } EXPORT_SYMBOL(target_cmd_parse_cdb); static int __target_submit(struct se_cmd *cmd) { sense_reason_t ret; might_sleep(); /* * Check if we need to delay processing because of ALUA * Active/NonOptimized primary access state.. */ core_alua_check_nonop_delay(cmd); if (cmd->t_data_nents != 0) { /* * This is primarily a hack for udev and tcm loop which sends * INQUIRYs with a single page and expects the data to be * cleared. */ if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && cmd->data_direction == DMA_FROM_DEVICE) { struct scatterlist *sgl = cmd->t_data_sg; unsigned char *buf = NULL; BUG_ON(!sgl); buf = kmap_local_page(sg_page(sgl)); if (buf) { memset(buf + sgl->offset, 0, sgl->length); kunmap_local(buf); } } } if (!cmd->se_lun) { dump_stack(); pr_err("cmd->se_lun is NULL\n"); return -EINVAL; } /* * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that * outstanding descriptors are handled correctly during shutdown via * transport_wait_for_tasks() * * Also, we don't take cmd->t_state_lock here as we only expect * this to be called for initial descriptor submission. */ cmd->t_state = TRANSPORT_NEW_CMD; cmd->transport_state |= CMD_T_ACTIVE; /* * transport_generic_new_cmd() is already handling QUEUE_FULL, * so follow TRANSPORT_NEW_CMD processing thread context usage * and call transport_generic_request_failure() if necessary.. */ ret = transport_generic_new_cmd(cmd); if (ret) transport_generic_request_failure(cmd, ret); return 0; } sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count) { if (!sgl || !sgl_count) return 0; /* * Reject SCSI data overflow with map_mem_to_cmd() as incoming * scatterlists already have been set to follow what the fabric * passes for the original expected data transfer length. */ if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { pr_warn("Rejecting SCSI DATA overflow for fabric using" " SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n"); return TCM_INVALID_CDB_FIELD; } cmd->t_data_sg = sgl; cmd->t_data_nents = sgl_count; cmd->t_bidi_data_sg = sgl_bidi; cmd->t_bidi_data_nents = sgl_bidi_count; cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; return 0; } /** * target_init_cmd - initialize se_cmd * @se_cmd: command descriptor to init * @se_sess: associated se_sess for endpoint * @sense: pointer to SCSI sense buffer * @unpacked_lun: unpacked LUN to reference for struct se_lun * @data_length: fabric expected data transfer length * @task_attr: SAM task attribute * @data_dir: DMA data direction * @flags: flags for command submission from target_sc_flags_tables * * Task tags are supported if the caller has set @se_cmd->tag. * * Returns: * - less than zero to signal active I/O shutdown failure. * - zero on success. * * If the fabric driver calls target_stop_session, then it must check the * return code and handle failures. This will never fail for other drivers, * and the return code can be ignored. */ int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, unsigned char *sense, u64 unpacked_lun, u32 data_length, int task_attr, int data_dir, int flags) { struct se_portal_group *se_tpg; se_tpg = se_sess->se_tpg; BUG_ON(!se_tpg); BUG_ON(se_cmd->se_tfo || se_cmd->se_sess); if (flags & TARGET_SCF_USE_CPUID) se_cmd->se_cmd_flags |= SCF_USE_CPUID; /* * Signal bidirectional data payloads to target-core */ if (flags & TARGET_SCF_BIDI_OP) se_cmd->se_cmd_flags |= SCF_BIDI; if (flags & TARGET_SCF_UNKNOWN_SIZE) se_cmd->unknown_data_length = 1; /* * Initialize se_cmd for target operation. From this point * exceptions are handled by sending exception status via * target_core_fabric_ops->queue_status() callback */ __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length, data_dir, task_attr, sense, unpacked_lun, se_sess->cmd_cnt); /* * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second * kref_put() to happen during fabric packet acknowledgement. */ return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); } EXPORT_SYMBOL_GPL(target_init_cmd); /** * target_submit_prep - prepare cmd for submission * @se_cmd: command descriptor to prep * @cdb: pointer to SCSI CDB * @sgl: struct scatterlist memory for unidirectional mapping * @sgl_count: scatterlist count for unidirectional mapping * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping * @sgl_bidi_count: scatterlist count for bidirectional READ mapping * @sgl_prot: struct scatterlist memory protection information * @sgl_prot_count: scatterlist count for protection information * @gfp: gfp allocation type * * Returns: * - less than zero to signal failure. * - zero on success. * * If failure is returned, lio will the callers queue_status to complete * the cmd. */ int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb, struct scatterlist *sgl, u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count, struct scatterlist *sgl_prot, u32 sgl_prot_count, gfp_t gfp) { sense_reason_t rc; rc = target_cmd_init_cdb(se_cmd, cdb, gfp); if (rc) goto send_cc_direct; /* * Locate se_lun pointer and attach it to struct se_cmd */ rc = transport_lookup_cmd_lun(se_cmd); if (rc) goto send_cc_direct; rc = target_cmd_parse_cdb(se_cmd); if (rc != 0) goto generic_fail; /* * Save pointers for SGLs containing protection information, * if present. */ if (sgl_prot_count) { se_cmd->t_prot_sg = sgl_prot; se_cmd->t_prot_nents = sgl_prot_count; se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC; } /* * When a non zero sgl_count has been passed perform SGL passthrough * mapping for pre-allocated fabric memory instead of having target * core perform an internal SGL allocation.. */ if (sgl_count != 0) { BUG_ON(!sgl); rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count, sgl_bidi, sgl_bidi_count); if (rc != 0) goto generic_fail; } return 0; send_cc_direct: transport_send_check_condition_and_sense(se_cmd, rc, 0); target_put_sess_cmd(se_cmd); return -EIO; generic_fail: transport_generic_request_failure(se_cmd, rc); return -EIO; } EXPORT_SYMBOL_GPL(target_submit_prep); /** * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd * * @se_cmd: command descriptor to submit * @se_sess: associated se_sess for endpoint * @cdb: pointer to SCSI CDB * @sense: pointer to SCSI sense buffer * @unpacked_lun: unpacked LUN to reference for struct se_lun * @data_length: fabric expected data transfer length * @task_attr: SAM task attribute * @data_dir: DMA data direction * @flags: flags for command submission from target_sc_flags_tables * * Task tags are supported if the caller has set @se_cmd->tag. * * This may only be called from process context, and also currently * assumes internal allocation of fabric payload buffer by target-core. * * It also assumes interal target core SGL memory allocation. * * This function must only be used by drivers that do their own * sync during shutdown and does not use target_stop_session. If there * is a failure this function will call into the fabric driver's * queue_status with a CHECK_CONDITION. */ void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess, unsigned char *cdb, unsigned char *sense, u64 unpacked_lun, u32 data_length, int task_attr, int data_dir, int flags) { int rc; rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length, task_attr, data_dir, flags); WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n"); if (rc) return; if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0, GFP_KERNEL)) return; target_submit(se_cmd); } EXPORT_SYMBOL(target_submit_cmd); static struct se_dev_plug *target_plug_device(struct se_device *se_dev) { struct se_dev_plug *se_plug; if (!se_dev->transport->plug_device) return NULL; se_plug = se_dev->transport->plug_device(se_dev); if (!se_plug) return NULL; se_plug->se_dev = se_dev; /* * We have a ref to the lun at this point, but the cmds could * complete before we unplug, so grab a ref to the se_device so we * can call back into the backend. */ config_group_get(&se_dev->dev_group); return se_plug; } static void target_unplug_device(struct se_dev_plug *se_plug) { struct se_device *se_dev = se_plug->se_dev; se_dev->transport->unplug_device(se_plug); config_group_put(&se_dev->dev_group); } void target_queued_submit_work(struct work_struct *work) { struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work); struct se_cmd *se_cmd, *next_cmd; struct se_dev_plug *se_plug = NULL; struct se_device *se_dev = NULL; struct llist_node *cmd_list; cmd_list = llist_del_all(&sq->cmd_list); if (!cmd_list) /* Previous call took what we were queued to submit */ return; cmd_list = llist_reverse_order(cmd_list); llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) { if (!se_dev) { se_dev = se_cmd->se_dev; se_plug = target_plug_device(se_dev); } __target_submit(se_cmd); } if (se_plug) target_unplug_device(se_plug); } /** * target_queue_submission - queue the cmd to run on the LIO workqueue * @se_cmd: command descriptor to submit */ static void target_queue_submission(struct se_cmd *se_cmd) { struct se_device *se_dev = se_cmd->se_dev; int cpu = se_cmd->cpuid; struct se_cmd_queue *sq; sq = &se_dev->queues[cpu].sq; llist_add(&se_cmd->se_cmd_list, &sq->cmd_list); queue_work_on(cpu, target_submission_wq, &sq->work); } /** * target_submit - perform final initialization and submit cmd to LIO core * @se_cmd: command descriptor to submit * * target_submit_prep or something similar must have been called on the cmd, * and this must be called from process context. */ int target_submit(struct se_cmd *se_cmd) { const struct target_core_fabric_ops *tfo = se_cmd->se_sess->se_tpg->se_tpg_tfo; struct se_dev_attrib *da = &se_cmd->se_dev->dev_attrib; u8 submit_type; if (da->submit_type == TARGET_FABRIC_DEFAULT_SUBMIT) submit_type = tfo->default_submit_type; else if (da->submit_type == TARGET_DIRECT_SUBMIT && tfo->direct_submit_supp) submit_type = TARGET_DIRECT_SUBMIT; else submit_type = TARGET_QUEUE_SUBMIT; if (submit_type == TARGET_DIRECT_SUBMIT) return __target_submit(se_cmd); target_queue_submission(se_cmd); return 0; } EXPORT_SYMBOL_GPL(target_submit); static void target_complete_tmr_failure(struct work_struct *work) { struct se_cmd *se_cmd = container_of(work, struct se_cmd, work); se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST; se_cmd->se_tfo->queue_tm_rsp(se_cmd); transport_lun_remove_cmd(se_cmd); transport_cmd_check_stop_to_fabric(se_cmd); } /** * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd * for TMR CDBs * * @se_cmd: command descriptor to submit * @se_sess: associated se_sess for endpoint * @sense: pointer to SCSI sense buffer * @unpacked_lun: unpacked LUN to reference for struct se_lun * @fabric_tmr_ptr: fabric context for TMR req * @tm_type: Type of TM request * @gfp: gfp type for caller * @tag: referenced task tag for TMR_ABORT_TASK * @flags: submit cmd flags * * Callable from all contexts. **/ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, unsigned char *sense, u64 unpacked_lun, void *fabric_tmr_ptr, unsigned char tm_type, gfp_t gfp, u64 tag, int flags) { struct se_portal_group *se_tpg; int ret; se_tpg = se_sess->se_tpg; BUG_ON(!se_tpg); __target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, 0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun, se_sess->cmd_cnt); /* * FIXME: Currently expect caller to handle se_cmd->se_tmr_req * allocation failure. */ ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp); if (ret < 0) return -ENOMEM; if (tm_type == TMR_ABORT_TASK) se_cmd->se_tmr_req->ref_task_tag = tag; /* See target_submit_cmd for commentary */ ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF); if (ret) { core_tmr_release_req(se_cmd->se_tmr_req); return ret; } ret = transport_lookup_tmr_lun(se_cmd); if (ret) goto failure; transport_generic_handle_tmr(se_cmd); return 0; /* * For callback during failure handling, push this work off * to process context with TMR_LUN_DOES_NOT_EXIST status. */ failure: INIT_WORK(&se_cmd->work, target_complete_tmr_failure); schedule_work(&se_cmd->work); return 0; } EXPORT_SYMBOL(target_submit_tmr); /* * Handle SAM-esque emulation for generic transport request failures. */ void transport_generic_request_failure(struct se_cmd *cmd, sense_reason_t sense_reason) { int ret = 0, post_ret; pr_debug("-----[ Storage Engine Exception; sense_reason %d\n", sense_reason); target_show_cmd("-----[ ", cmd); /* * For SAM Task Attribute emulation for failed struct se_cmd */ transport_complete_task_attr(cmd); if (cmd->transport_complete_callback) cmd->transport_complete_callback(cmd, false, &post_ret); if (cmd->transport_state & CMD_T_ABORTED) { INIT_WORK(&cmd->work, target_abort_work); queue_work(target_completion_wq, &cmd->work); return; } switch (sense_reason) { case TCM_NON_EXISTENT_LUN: case TCM_UNSUPPORTED_SCSI_OPCODE: case TCM_INVALID_CDB_FIELD: case TCM_INVALID_PARAMETER_LIST: case TCM_PARAMETER_LIST_LENGTH_ERROR: case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE: case TCM_UNKNOWN_MODE_PAGE: case TCM_WRITE_PROTECTED: case TCM_ADDRESS_OUT_OF_RANGE: case TCM_CHECK_CONDITION_ABORT_CMD: case TCM_CHECK_CONDITION_UNIT_ATTENTION: case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED: case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED: case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED: case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE: case TCM_TOO_MANY_TARGET_DESCS: case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE: case TCM_TOO_MANY_SEGMENT_DESCS: case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE: case TCM_INVALID_FIELD_IN_COMMAND_IU: case TCM_ALUA_TG_PT_STANDBY: case TCM_ALUA_TG_PT_UNAVAILABLE: case TCM_ALUA_STATE_TRANSITION: case TCM_ALUA_OFFLINE: break; case TCM_OUT_OF_RESOURCES: cmd->scsi_status = SAM_STAT_TASK_SET_FULL; goto queue_status; case TCM_LUN_BUSY: cmd->scsi_status = SAM_STAT_BUSY; goto queue_status; case TCM_RESERVATION_CONFLICT: /* * No SENSE Data payload for this case, set SCSI Status * and queue the response to $FABRIC_MOD. * * Uses linux/include/scsi/scsi.h SAM status codes defs */ cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; /* * For UA Interlock Code 11b, a RESERVATION CONFLICT will * establish a UNIT ATTENTION with PREVIOUS RESERVATION * CONFLICT STATUS. * * See spc4r17, section 7.4.6 Control Mode Page, Table 349 */ if (cmd->se_sess && cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl == TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) { target_ua_allocate_lun(cmd->se_sess->se_node_acl, cmd->orig_fe_lun, 0x2C, ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS); } goto queue_status; default: pr_err("Unknown transport error for CDB 0x%02x: %d\n", cmd->t_task_cdb[0], sense_reason); sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE; break; } ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); if (ret) goto queue_full; check_stop: transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; queue_status: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); if (!ret) goto check_stop; queue_full: transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } EXPORT_SYMBOL(transport_generic_request_failure); void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) { sense_reason_t ret; if (!cmd->execute_cmd) { ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; goto err; } if (do_checks) { /* * Check for an existing UNIT ATTENTION condition after * target_handle_task_attr() has done SAM task attr * checking, and possibly have already defered execution * out to target_restart_delayed_cmds() context. */ ret = target_scsi3_ua_check(cmd); if (ret) goto err; ret = target_alua_state_check(cmd); if (ret) goto err; ret = target_check_reservation(cmd); if (ret) { cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; goto err; } } ret = cmd->execute_cmd(cmd); if (!ret) return; err: spin_lock_irq(&cmd->t_state_lock); cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, ret); } static int target_write_prot_action(struct se_cmd *cmd) { u32 sectors; /* * Perform WRITE_INSERT of PI using software emulation when backend * device has PI enabled, if the transport has not already generated * PI using hardware WRITE_INSERT offload. */ switch (cmd->prot_op) { case TARGET_PROT_DOUT_INSERT: if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) sbc_dif_generate(cmd); break; case TARGET_PROT_DOUT_STRIP: if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) break; sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, cmd->t_prot_sg, 0); if (unlikely(cmd->pi_err)) { spin_lock_irq(&cmd->t_state_lock); cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); transport_generic_request_failure(cmd, cmd->pi_err); return -1; } break; default: break; } return 0; } static bool target_handle_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return false; cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; /* * Check for the existence of HEAD_OF_QUEUE, and if true return 1 * to allow the passed struct se_cmd list of tasks to the front of the list. */ switch (cmd->sam_task_attr) { case TCM_HEAD_TAG: atomic_inc_mb(&dev->non_ordered); pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n", cmd->t_task_cdb[0]); return false; case TCM_ORDERED_TAG: atomic_inc_mb(&dev->delayed_cmd_count); pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n", cmd->t_task_cdb[0]); break; default: /* * For SIMPLE and UNTAGGED Task Attribute commands */ atomic_inc_mb(&dev->non_ordered); if (atomic_read(&dev->delayed_cmd_count) == 0) return false; break; } if (cmd->sam_task_attr != TCM_ORDERED_TAG) { atomic_inc_mb(&dev->delayed_cmd_count); /* * We will account for this when we dequeue from the delayed * list. */ atomic_dec_mb(&dev->non_ordered); } spin_lock_irq(&cmd->t_state_lock); cmd->transport_state &= ~CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); spin_lock(&dev->delayed_cmd_lock); list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); spin_unlock(&dev->delayed_cmd_lock); pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn", cmd->t_task_cdb[0], cmd->sam_task_attr); /* * We may have no non ordered cmds when this function started or we * could have raced with the last simple/head cmd completing, so kick * the delayed handler here. */ schedule_work(&dev->delayed_cmd_work); return true; } void target_execute_cmd(struct se_cmd *cmd) { /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. * * If the received CDB has already been aborted stop processing it here. */ if (target_cmd_interrupted(cmd)) return; spin_lock_irq(&cmd->t_state_lock); cmd->t_state = TRANSPORT_PROCESSING; cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; spin_unlock_irq(&cmd->t_state_lock); if (target_write_prot_action(cmd)) return; if (target_handle_task_attr(cmd)) return; __target_execute_cmd(cmd, true); } EXPORT_SYMBOL(target_execute_cmd); /* * Process all commands up to the last received ORDERED task attribute which * requires another blocking boundary */ void target_do_delayed_work(struct work_struct *work) { struct se_device *dev = container_of(work, struct se_device, delayed_cmd_work); spin_lock(&dev->delayed_cmd_lock); while (!dev->ordered_sync_in_progress) { struct se_cmd *cmd; if (list_empty(&dev->delayed_cmd_list)) break; cmd = list_entry(dev->delayed_cmd_list.next, struct se_cmd, se_delayed_node); if (cmd->sam_task_attr == TCM_ORDERED_TAG) { /* * Check if we started with: * [ordered] [simple] [ordered] * and we are now at the last ordered so we have to wait * for the simple cmd. */ if (atomic_read(&dev->non_ordered) > 0) break; dev->ordered_sync_in_progress = true; } list_del(&cmd->se_delayed_node); atomic_dec_mb(&dev->delayed_cmd_count); spin_unlock(&dev->delayed_cmd_lock); if (cmd->sam_task_attr != TCM_ORDERED_TAG) atomic_inc_mb(&dev->non_ordered); cmd->transport_state |= CMD_T_SENT; __target_execute_cmd(cmd, true); spin_lock(&dev->delayed_cmd_lock); } spin_unlock(&dev->delayed_cmd_lock); } /* * Called from I/O completion to determine which dormant/delayed * and ordered cmds need to have their tasks added to the execution queue. */ static void transport_complete_task_attr(struct se_cmd *cmd) { struct se_device *dev = cmd->se_dev; if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) return; if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) goto restart; if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { atomic_dec_mb(&dev->non_ordered); dev->dev_cur_ordered_id++; } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { atomic_dec_mb(&dev->non_ordered); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n", dev->dev_cur_ordered_id); } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { spin_lock(&dev->delayed_cmd_lock); dev->ordered_sync_in_progress = false; spin_unlock(&dev->delayed_cmd_lock); dev->dev_cur_ordered_id++; pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n", dev->dev_cur_ordered_id); } cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; restart: if (atomic_read(&dev->delayed_cmd_count) > 0) schedule_work(&dev->delayed_cmd_work); } static void transport_complete_qf(struct se_cmd *cmd) { int ret = 0; transport_complete_task_attr(cmd); /* * If a fabric driver ->write_pending() or ->queue_data_in() callback * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and * the same callbacks should not be retried. Return CHECK_CONDITION * if a scsi_status is not already set. * * If a fabric driver ->queue_status() has returned non zero, always * keep retrying no matter what.. */ if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { if (cmd->scsi_status) goto queue_status; translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); goto queue_status; } /* * Check if we need to send a sense buffer from * the struct se_cmd in question. We do NOT want * to take this path of the IO has been marked as * needing to be treated like a "normal read". This * is the case if it's a tape read, and either the * FM, EOM, or ILI bits are set, but there is no * sense data. */ if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) goto queue_status; switch (cmd->data_direction) { case DMA_FROM_DEVICE: /* queue status if not treating this as a normal read */ if (cmd->scsi_status && !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) goto queue_status; trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_data_in(cmd); break; case DMA_TO_DEVICE: if (cmd->se_cmd_flags & SCF_BIDI) { ret = cmd->se_tfo->queue_data_in(cmd); break; } fallthrough; case DMA_NONE: queue_status: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); break; default: break; } if (ret < 0) { transport_handle_queue_full(cmd, cmd->se_dev, ret, false); return; } transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); } static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, int err, bool write_pending) { /* * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or * ->queue_data_in() callbacks from new process context. * * Otherwise for other errors, transport_complete_qf() will send * CHECK_CONDITION via ->queue_status() instead of attempting to * retry associated fabric driver data-transfer callbacks. */ if (err == -EAGAIN || err == -ENOMEM) { cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : TRANSPORT_COMPLETE_QF_OK; } else { pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err); cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; } spin_lock_irq(&dev->qf_cmd_lock); list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); atomic_inc_mb(&dev->dev_qf_count); spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); schedule_work(&cmd->se_dev->qf_work_queue); } static bool target_read_prot_action(struct se_cmd *cmd) { switch (cmd->prot_op) { case TARGET_PROT_DIN_STRIP: if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { u32 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, sectors, 0, cmd->t_prot_sg, 0); if (cmd->pi_err) return true; } break; case TARGET_PROT_DIN_INSERT: if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) break; sbc_dif_generate(cmd); break; default: break; } return false; } static void target_complete_ok_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); int ret; /* * Check if we need to move delayed/dormant tasks from cmds on the * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task * Attribute. */ transport_complete_task_attr(cmd); /* * Check to schedule QUEUE_FULL work, or execute an existing * cmd->transport_qf_callback() */ if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) schedule_work(&cmd->se_dev->qf_work_queue); /* * Check if we need to send a sense buffer from * the struct se_cmd in question. We do NOT want * to take this path of the IO has been marked as * needing to be treated like a "normal read". This * is the case if it's a tape read, and either the * FM, EOM, or ILI bits are set, but there is no * sense data. */ if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { WARN_ON(!cmd->scsi_status); ret = transport_send_check_condition_and_sense( cmd, 0, 1); if (ret) goto queue_full; transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } /* * Check for a callback, used by amongst other things * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation. */ if (cmd->transport_complete_callback) { sense_reason_t rc; bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); bool zero_dl = !(cmd->data_length); int post_ret = 0; rc = cmd->transport_complete_callback(cmd, true, &post_ret); if (!rc && !post_ret) { if (caw && zero_dl) goto queue_rsp; return; } else if (rc) { ret = transport_send_check_condition_and_sense(cmd, rc, 0); if (ret) goto queue_full; transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } } queue_rsp: switch (cmd->data_direction) { case DMA_FROM_DEVICE: /* * if this is a READ-type IO, but SCSI status * is set, then skip returning data and just * return the status -- unless this IO is marked * as needing to be treated as a normal read, * in which case we want to go ahead and return * the data. This happens, for example, for tape * reads with the FM, EOM, or ILI bits set, with * no sense data. */ if (cmd->scsi_status && !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) goto queue_status; atomic_long_add(cmd->data_length, &cmd->se_lun->lun_stats.tx_data_octets); /* * Perform READ_STRIP of PI using software emulation when * backend had PI enabled, if the transport will not be * performing hardware READ_STRIP offload. */ if (target_read_prot_action(cmd)) { ret = transport_send_check_condition_and_sense(cmd, cmd->pi_err, 0); if (ret) goto queue_full; transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; } trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_data_in(cmd); if (ret) goto queue_full; break; case DMA_TO_DEVICE: atomic_long_add(cmd->data_length, &cmd->se_lun->lun_stats.rx_data_octets); /* * Check if we need to send READ payload for BIDI-COMMAND */ if (cmd->se_cmd_flags & SCF_BIDI) { atomic_long_add(cmd->data_length, &cmd->se_lun->lun_stats.tx_data_octets); ret = cmd->se_tfo->queue_data_in(cmd); if (ret) goto queue_full; break; } fallthrough; case DMA_NONE: queue_status: trace_target_cmd_complete(cmd); ret = cmd->se_tfo->queue_status(cmd); if (ret) goto queue_full; break; default: break; } transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; queue_full: pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p," " data_direction: %d\n", cmd, cmd->data_direction); transport_handle_queue_full(cmd, cmd->se_dev, ret, false); } void target_free_sgl(struct scatterlist *sgl, int nents) { sgl_free_n_order(sgl, nents, 0); } EXPORT_SYMBOL(target_free_sgl); static inline void transport_reset_sgl_orig(struct se_cmd *cmd) { /* * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE * emulation, and free + reset pointers if necessary.. */ if (!cmd->t_data_sg_orig) return; kfree(cmd->t_data_sg); cmd->t_data_sg = cmd->t_data_sg_orig; cmd->t_data_sg_orig = NULL; cmd->t_data_nents = cmd->t_data_nents_orig; cmd->t_data_nents_orig = 0; } static inline void transport_free_pages(struct se_cmd *cmd) { if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); cmd->t_prot_sg = NULL; cmd->t_prot_nents = 0; } if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { /* * Release special case READ buffer payload required for * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE */ if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); cmd->t_bidi_data_sg = NULL; cmd->t_bidi_data_nents = 0; } transport_reset_sgl_orig(cmd); return; } transport_reset_sgl_orig(cmd); target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); cmd->t_data_sg = NULL; cmd->t_data_nents = 0; target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); cmd->t_bidi_data_sg = NULL; cmd->t_bidi_data_nents = 0; } void *transport_kmap_data_sg(struct se_cmd *cmd) { struct scatterlist *sg = cmd->t_data_sg; struct page **pages; int i; /* * We need to take into account a possible offset here for fabrics like * tcm_loop who may be using a contig buffer from the SCSI midlayer for * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() */ if (!cmd->t_data_nents) return NULL; BUG_ON(!sg); if (cmd->t_data_nents == 1) return kmap(sg_page(sg)) + sg->offset; /* >1 page. use vmap */ pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); if (!pages) return NULL; /* convert sg[] to pages[] */ for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { pages[i] = sg_page(sg); } cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); kfree(pages); if (!cmd->t_data_vmap) return NULL; return cmd->t_data_vmap + cmd->t_data_sg[0].offset; } EXPORT_SYMBOL(transport_kmap_data_sg); void transport_kunmap_data_sg(struct se_cmd *cmd) { if (!cmd->t_data_nents) { return; } else if (cmd->t_data_nents == 1) { kunmap(sg_page(cmd->t_data_sg)); return; } vunmap(cmd->t_data_vmap); cmd->t_data_vmap = NULL; } EXPORT_SYMBOL(transport_kunmap_data_sg); int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length, bool zero_page, bool chainable) { gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0); *sgl = sgl_alloc_order(length, 0, chainable, gfp, nents); return *sgl ? 0 : -ENOMEM; } EXPORT_SYMBOL(target_alloc_sgl); /* * Allocate any required resources to execute the command. For writes we * might not have the payload yet, so notify the fabric via a call to * ->write_pending instead. Otherwise place it on the execution queue. */ sense_reason_t transport_generic_new_cmd(struct se_cmd *cmd) { unsigned long flags; int ret = 0; bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); if (cmd->prot_op != TARGET_PROT_NORMAL && !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, cmd->prot_length, true, false); if (ret < 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } /* * Determine if the TCM fabric module has already allocated physical * memory, and is directly calling transport_generic_map_mem_to_cmd() * beforehand. */ if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && cmd->data_length) { if ((cmd->se_cmd_flags & SCF_BIDI) || (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { u32 bidi_length; if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) bidi_length = cmd->t_task_nolb * cmd->se_dev->dev_attrib.block_size; else bidi_length = cmd->data_length; ret = target_alloc_sgl(&cmd->t_bidi_data_sg, &cmd->t_bidi_data_nents, bidi_length, zero_flag, false); if (ret < 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, cmd->data_length, zero_flag, false); if (ret < 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && cmd->data_length) { /* * Special case for COMPARE_AND_WRITE with fabrics * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC. */ u32 caw_length = cmd->t_task_nolb * cmd->se_dev->dev_attrib.block_size; ret = target_alloc_sgl(&cmd->t_bidi_data_sg, &cmd->t_bidi_data_nents, caw_length, zero_flag, false); if (ret < 0) return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } /* * If this command is not a write we can execute it right here, * for write buffers we need to notify the fabric driver first * and let it call back once the write buffers are ready. */ target_add_to_state_list(cmd); if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { target_execute_cmd(cmd); return 0; } spin_lock_irqsave(&cmd->t_state_lock, flags); cmd->t_state = TRANSPORT_WRITE_PENDING; /* * Determine if frontend context caller is requesting the stopping of * this command for frontend exceptions. */ if (cmd->transport_state & CMD_T_STOP && !cmd->se_tfo->write_pending_must_be_called) { pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n", __func__, __LINE__, cmd->tag); spin_unlock_irqrestore(&cmd->t_state_lock, flags); complete_all(&cmd->t_transport_stop_comp); return 0; } cmd->transport_state &= ~CMD_T_ACTIVE; spin_unlock_irqrestore(&cmd->t_state_lock, flags); ret = cmd->se_tfo->write_pending(cmd); if (ret) goto queue_full; return 0; queue_full: pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); transport_handle_queue_full(cmd, cmd->se_dev, ret, true); return 0; } EXPORT_SYMBOL(transport_generic_new_cmd); static void transport_write_pending_qf(struct se_cmd *cmd) { unsigned long flags; int ret; bool stop; spin_lock_irqsave(&cmd->t_state_lock, flags); stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (stop) { pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n", __func__, __LINE__, cmd->tag); complete_all(&cmd->t_transport_stop_comp); return; } ret = cmd->se_tfo->write_pending(cmd); if (ret) { pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); transport_handle_queue_full(cmd, cmd->se_dev, ret, true); } } static bool __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *, unsigned long *flags); static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) { unsigned long flags; spin_lock_irqsave(&cmd->t_state_lock, flags); __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags); } /* * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has * finished. */ void target_put_cmd_and_wait(struct se_cmd *cmd) { DECLARE_COMPLETION_ONSTACK(compl); WARN_ON_ONCE(cmd->abrt_compl); cmd->abrt_compl = &compl; target_put_sess_cmd(cmd); wait_for_completion(&compl); } /* * This function is called by frontend drivers after processing of a command * has finished. * * The protocol for ensuring that either the regular frontend command * processing flow or target_handle_abort() code drops one reference is as * follows: * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause * the frontend driver to call this function synchronously or asynchronously. * That will cause one reference to be dropped. * - During regular command processing the target core sets CMD_T_COMPLETE * before invoking one of the .queue_*() functions. * - The code that aborts commands skips commands and TMFs for which * CMD_T_COMPLETE has been set. * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for * commands that will be aborted. * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set * transport_generic_free_cmd() skips its call to target_put_sess_cmd(). * - For aborted commands for which CMD_T_TAS has been set .queue_status() will * be called and will drop a reference. * - For aborted commands for which CMD_T_TAS has not been set .aborted_task() * will be called. target_handle_abort() will drop the final reference. */ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) { DECLARE_COMPLETION_ONSTACK(compl); int ret = 0; bool aborted = false, tas = false; if (wait_for_tasks) target_wait_free_cmd(cmd, &aborted, &tas); if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { /* * Handle WRITE failure case where transport_generic_new_cmd() * has already added se_cmd to state_list, but fabric has * failed command before I/O submission. */ if (cmd->state_active) target_remove_from_state_list(cmd); if (cmd->se_lun) transport_lun_remove_cmd(cmd); } if (aborted) cmd->free_compl = &compl; ret = target_put_sess_cmd(cmd); if (aborted) { pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); wait_for_completion(&compl); ret = 1; } return ret; } EXPORT_SYMBOL(transport_generic_free_cmd); /** * target_get_sess_cmd - Verify the session is accepting cmds and take ref * @se_cmd: command descriptor to add * @ack_kref: Signal that fabric will perform an ack target_put_sess_cmd() */ int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref) { int ret = 0; /* * Add a second kref if the fabric caller is expecting to handle * fabric acknowledgement that requires two target_put_sess_cmd() * invocations before se_cmd descriptor release. */ if (ack_kref) { kref_get(&se_cmd->cmd_kref); se_cmd->se_cmd_flags |= SCF_ACK_KREF; } /* * Users like xcopy do not use counters since they never do a stop * and wait. */ if (se_cmd->cmd_cnt) { if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt)) ret = -ESHUTDOWN; } if (ret && ack_kref) target_put_sess_cmd(se_cmd); return ret; } EXPORT_SYMBOL(target_get_sess_cmd); static void target_free_cmd_mem(struct se_cmd *cmd) { transport_free_pages(cmd); if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) core_tmr_release_req(cmd->se_tmr_req); if (cmd->t_task_cdb != cmd->__t_task_cdb) kfree(cmd->t_task_cdb); } static void target_release_cmd_kref(struct kref *kref) { struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref); struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt; struct completion *free_compl = se_cmd->free_compl; struct completion *abrt_compl = se_cmd->abrt_compl; target_free_cmd_mem(se_cmd); se_cmd->se_tfo->release_cmd(se_cmd); if (free_compl) complete(free_compl); if (abrt_compl) complete(abrt_compl); if (cmd_cnt) percpu_ref_put(&cmd_cnt->refcnt); } /** * target_put_sess_cmd - decrease the command reference count * @se_cmd: command to drop a reference from * * Returns 1 if and only if this target_put_sess_cmd() call caused the * refcount to drop to zero. Returns zero otherwise. */ int target_put_sess_cmd(struct se_cmd *se_cmd) { return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref); } EXPORT_SYMBOL(target_put_sess_cmd); static const char *data_dir_name(enum dma_data_direction d) { switch (d) { case DMA_BIDIRECTIONAL: return "BIDI"; case DMA_TO_DEVICE: return "WRITE"; case DMA_FROM_DEVICE: return "READ"; case DMA_NONE: return "NONE"; } return "(?)"; } static const char *cmd_state_name(enum transport_state_table t) { switch (t) { case TRANSPORT_NO_STATE: return "NO_STATE"; case TRANSPORT_NEW_CMD: return "NEW_CMD"; case TRANSPORT_WRITE_PENDING: return "WRITE_PENDING"; case TRANSPORT_PROCESSING: return "PROCESSING"; case TRANSPORT_COMPLETE: return "COMPLETE"; case TRANSPORT_ISTATE_PROCESSING: return "ISTATE_PROCESSING"; case TRANSPORT_COMPLETE_QF_WP: return "COMPLETE_QF_WP"; case TRANSPORT_COMPLETE_QF_OK: return "COMPLETE_QF_OK"; case TRANSPORT_COMPLETE_QF_ERR: return "COMPLETE_QF_ERR"; } return "(?)"; } static void target_append_str(char **str, const char *txt) { char *prev = *str; *str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) : kstrdup(txt, GFP_ATOMIC); kfree(prev); } /* * Convert a transport state bitmask into a string. The caller is * responsible for freeing the returned pointer. */ static char *target_ts_to_str(u32 ts) { char *str = NULL; if (ts & CMD_T_ABORTED) target_append_str(&str, "aborted"); if (ts & CMD_T_ACTIVE) target_append_str(&str, "active"); if (ts & CMD_T_COMPLETE) target_append_str(&str, "complete"); if (ts & CMD_T_SENT) target_append_str(&str, "sent"); if (ts & CMD_T_STOP) target_append_str(&str, "stop"); if (ts & CMD_T_FABRIC_STOP) target_append_str(&str, "fabric_stop"); return str; } static const char *target_tmf_name(enum tcm_tmreq_table tmf) { switch (tmf) { case TMR_ABORT_TASK: return "ABORT_TASK"; case TMR_ABORT_TASK_SET: return "ABORT_TASK_SET"; case TMR_CLEAR_ACA: return "CLEAR_ACA"; case TMR_CLEAR_TASK_SET: return "CLEAR_TASK_SET"; case TMR_LUN_RESET: return "LUN_RESET"; case TMR_TARGET_WARM_RESET: return "TARGET_WARM_RESET"; case TMR_TARGET_COLD_RESET: return "TARGET_COLD_RESET"; case TMR_LUN_RESET_PRO: return "LUN_RESET_PRO"; case TMR_UNKNOWN: break; } return "(?)"; } void target_show_cmd(const char *pfx, struct se_cmd *cmd) { char *ts_str = target_ts_to_str(cmd->transport_state); const u8 *cdb = cmd->t_task_cdb; struct se_tmr_req *tmf = cmd->se_tmr_req; if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n", pfx, cdb[0], cdb[1], cmd->tag, data_dir_name(cmd->data_direction), cmd->se_tfo->get_cmd_state(cmd), cmd_state_name(cmd->t_state), cmd->data_length, kref_read(&cmd->cmd_kref), ts_str); } else { pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n", pfx, target_tmf_name(tmf->function), cmd->tag, tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), cmd_state_name(cmd->t_state), kref_read(&cmd->cmd_kref), ts_str); } kfree(ts_str); } EXPORT_SYMBOL(target_show_cmd); static void target_stop_cmd_counter_confirm(struct percpu_ref *ref) { struct target_cmd_counter *cmd_cnt = container_of(ref, struct target_cmd_counter, refcnt); complete_all(&cmd_cnt->stop_done); } /** * target_stop_cmd_counter - Stop new IO from being added to the counter. * @cmd_cnt: counter to stop */ void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt) { pr_debug("Stopping command counter.\n"); if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1)) percpu_ref_kill_and_confirm(&cmd_cnt->refcnt, target_stop_cmd_counter_confirm); } EXPORT_SYMBOL_GPL(target_stop_cmd_counter); /** * target_stop_session - Stop new IO from being queued on the session. * @se_sess: session to stop */ void target_stop_session(struct se_session *se_sess) { target_stop_cmd_counter(se_sess->cmd_cnt); } EXPORT_SYMBOL(target_stop_session); /** * target_wait_for_cmds - Wait for outstanding cmds. * @cmd_cnt: counter to wait for active I/O for. */ void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt) { int ret; WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped)); do { pr_debug("Waiting for running cmds to complete.\n"); ret = wait_event_timeout(cmd_cnt->refcnt_wq, percpu_ref_is_zero(&cmd_cnt->refcnt), 180 * HZ); } while (ret <= 0); wait_for_completion(&cmd_cnt->stop_done); pr_debug("Waiting for cmds done.\n"); } EXPORT_SYMBOL_GPL(target_wait_for_cmds); /** * target_wait_for_sess_cmds - Wait for outstanding commands * @se_sess: session to wait for active I/O */ void target_wait_for_sess_cmds(struct se_session *se_sess) { target_wait_for_cmds(se_sess->cmd_cnt); } EXPORT_SYMBOL(target_wait_for_sess_cmds); /* * Prevent that new percpu_ref_tryget_live() calls succeed and wait until * all references to the LUN have been released. Called during LUN shutdown. */ void transport_clear_lun_ref(struct se_lun *lun) { percpu_ref_kill(&lun->lun_ref); wait_for_completion(&lun->lun_shutdown_comp); } static bool __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, bool *aborted, bool *tas, unsigned long *flags) __releases(&cmd->t_state_lock) __acquires(&cmd->t_state_lock) { lockdep_assert_held(&cmd->t_state_lock); if (fabric_stop) cmd->transport_state |= CMD_T_FABRIC_STOP; if (cmd->transport_state & CMD_T_ABORTED) *aborted = true; if (cmd->transport_state & CMD_T_TAS) *tas = true; if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) return false; if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) return false; if (!(cmd->transport_state & CMD_T_ACTIVE)) return false; if (fabric_stop && *aborted) return false; cmd->transport_state |= CMD_T_STOP; target_show_cmd("wait_for_tasks: Stopping ", cmd); spin_unlock_irqrestore(&cmd->t_state_lock, *flags); while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, 180 * HZ)) target_show_cmd("wait for tasks: ", cmd); spin_lock_irqsave(&cmd->t_state_lock, *flags); cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->" "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); return true; } /** * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp * @cmd: command to wait on */ bool transport_wait_for_tasks(struct se_cmd *cmd) { unsigned long flags; bool ret, aborted = false, tas = false; spin_lock_irqsave(&cmd->t_state_lock, flags); ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags); return ret; } EXPORT_SYMBOL(transport_wait_for_tasks); struct sense_detail { u8 key; u8 asc; u8 ascq; bool add_sense_info; }; static const struct sense_detail sense_detail_table[] = { [TCM_NO_SENSE] = { .key = NOT_READY }, [TCM_NON_EXISTENT_LUN] = { .key = ILLEGAL_REQUEST, .asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */ }, [TCM_UNSUPPORTED_SCSI_OPCODE] = { .key = ILLEGAL_REQUEST, .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ }, [TCM_SECTOR_COUNT_TOO_MANY] = { .key = ILLEGAL_REQUEST, .asc = 0x20, /* INVALID COMMAND OPERATION CODE */ }, [TCM_UNKNOWN_MODE_PAGE] = { .key = ILLEGAL_REQUEST, .asc = 0x24, /* INVALID FIELD IN CDB */ }, [TCM_CHECK_CONDITION_ABORT_CMD] = { .key = ABORTED_COMMAND, .asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */ .ascq = 0x03, }, [TCM_INCORRECT_AMOUNT_OF_DATA] = { .key = ABORTED_COMMAND, .asc = 0x0c, /* WRITE ERROR */ .ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */ }, [TCM_INVALID_CDB_FIELD] = { .key = ILLEGAL_REQUEST, .asc = 0x24, /* INVALID FIELD IN CDB */ }, [TCM_INVALID_PARAMETER_LIST] = { .key = ILLEGAL_REQUEST, .asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */ }, [TCM_TOO_MANY_TARGET_DESCS] = { .key = ILLEGAL_REQUEST, .asc = 0x26, .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */ }, [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = { .key = ILLEGAL_REQUEST, .asc = 0x26, .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */ }, [TCM_TOO_MANY_SEGMENT_DESCS] = { .key = ILLEGAL_REQUEST, .asc = 0x26, .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */ }, [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = { .key = ILLEGAL_REQUEST, .asc = 0x26, .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */ }, [TCM_PARAMETER_LIST_LENGTH_ERROR] = { .key = ILLEGAL_REQUEST, .asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */ }, [TCM_UNEXPECTED_UNSOLICITED_DATA] = { .key = ILLEGAL_REQUEST, .asc = 0x0c, /* WRITE ERROR */ .ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */ }, [TCM_SERVICE_CRC_ERROR] = { .key = ABORTED_COMMAND, .asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */ .ascq = 0x05, /* N/A */ }, [TCM_SNACK_REJECTED] = { .key = ABORTED_COMMAND, .asc = 0x11, /* READ ERROR */ .ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */ }, [TCM_WRITE_PROTECTED] = { .key = DATA_PROTECT, .asc = 0x27, /* WRITE PROTECTED */ }, [TCM_ADDRESS_OUT_OF_RANGE] = { .key = ILLEGAL_REQUEST, .asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */ }, [TCM_CHECK_CONDITION_UNIT_ATTENTION] = { .key = UNIT_ATTENTION, }, [TCM_MISCOMPARE_VERIFY] = { .key = MISCOMPARE, .asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */ .ascq = 0x00, .add_sense_info = true, }, [TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = { .key = ABORTED_COMMAND, .asc = 0x10, .ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */ .add_sense_info = true, }, [TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = { .key = ABORTED_COMMAND, .asc = 0x10, .ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */ .add_sense_info = true, }, [TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = { .key = ABORTED_COMMAND, .asc = 0x10, .ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */ .add_sense_info = true, }, [TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = { .key = COPY_ABORTED, .asc = 0x0d, .ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */ }, [TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = { /* * Returning ILLEGAL REQUEST would cause immediate IO errors on * Solaris initiators. Returning NOT READY instead means the * operations will be retried a finite number of times and we * can survive intermittent errors. */ .key = NOT_READY, .asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */ }, [TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = { /* * From spc4r22 section5.7.7,5.7.8 * If a PERSISTENT RESERVE OUT command with a REGISTER service action * or a REGISTER AND IGNORE EXISTING KEY service action or * REGISTER AND MOVE service actionis attempted, * but there are insufficient device server resources to complete the * operation, then the command shall be terminated with CHECK CONDITION * status, with the sense key set to ILLEGAL REQUEST,and the additonal * sense code set to INSUFFICIENT REGISTRATION RESOURCES. */ .key = ILLEGAL_REQUEST, .asc = 0x55, .ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */ }, [TCM_INVALID_FIELD_IN_COMMAND_IU] = { .key = ILLEGAL_REQUEST, .asc = 0x0e, .ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */ }, [TCM_ALUA_TG_PT_STANDBY] = { .key = NOT_READY, .asc = 0x04, .ascq = ASCQ_04H_ALUA_TG_PT_STANDBY, }, [TCM_ALUA_TG_PT_UNAVAILABLE] = { .key = NOT_READY, .asc = 0x04, .ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE, }, [TCM_ALUA_STATE_TRANSITION] = { .key = NOT_READY, .asc = 0x04, .ascq = ASCQ_04H_ALUA_STATE_TRANSITION, }, [TCM_ALUA_OFFLINE] = { .key = NOT_READY, .asc = 0x04, .ascq = ASCQ_04H_ALUA_OFFLINE, }, }; /** * translate_sense_reason - translate a sense reason into T10 key, asc and ascq * @cmd: SCSI command in which the resulting sense buffer or SCSI status will * be stored. * @reason: LIO sense reason code. If this argument has the value * TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If * dequeuing a unit attention fails due to multiple commands being processed * concurrently, set the command status to BUSY. * * Return: 0 upon success or -EINVAL if the sense buffer is too small. */ static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) { const struct sense_detail *sd; u8 *buffer = cmd->sense_buffer; int r = (__force int)reason; u8 key, asc, ascq; bool desc_format = target_sense_desc_format(cmd->se_dev); if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key) sd = &sense_detail_table[r]; else sd = &sense_detail_table[(__force int) TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE]; key = sd->key; if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) { if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, &ascq)) { cmd->scsi_status = SAM_STAT_BUSY; return; } } else { WARN_ON_ONCE(sd->asc == 0); asc = sd->asc; ascq = sd->ascq; } cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; cmd->scsi_status = SAM_STAT_CHECK_CONDITION; cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq); if (sd->add_sense_info) WARN_ON_ONCE(scsi_set_sense_information(buffer, cmd->scsi_sense_length, cmd->sense_info) < 0); } int transport_send_check_condition_and_sense(struct se_cmd *cmd, sense_reason_t reason, int from_transport) { unsigned long flags; WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { spin_unlock_irqrestore(&cmd->t_state_lock, flags); return 0; } cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (!from_transport) translate_sense_reason(cmd, reason); trace_target_cmd_complete(cmd); return cmd->se_tfo->queue_status(cmd); } EXPORT_SYMBOL(transport_send_check_condition_and_sense); /** * target_send_busy - Send SCSI BUSY status back to the initiator * @cmd: SCSI command for which to send a BUSY reply. * * Note: Only call this function if target_submit_cmd*() failed. */ int target_send_busy(struct se_cmd *cmd) { WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); cmd->scsi_status = SAM_STAT_BUSY; trace_target_cmd_complete(cmd); return cmd->se_tfo->queue_status(cmd); } EXPORT_SYMBOL(target_send_busy); static void target_tmr_work(struct work_struct *work) { struct se_cmd *cmd = container_of(work, struct se_cmd, work); struct se_device *dev = cmd->se_dev; struct se_tmr_req *tmr = cmd->se_tmr_req; int ret; if (cmd->transport_state & CMD_T_ABORTED) goto aborted; switch (tmr->function) { case TMR_ABORT_TASK: core_tmr_abort_task(dev, tmr, cmd->se_sess); break; case TMR_ABORT_TASK_SET: case TMR_CLEAR_ACA: case TMR_CLEAR_TASK_SET: tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED; break; case TMR_LUN_RESET: ret = core_tmr_lun_reset(dev, tmr, NULL, NULL); tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : TMR_FUNCTION_REJECTED; if (tmr->response == TMR_FUNCTION_COMPLETE) { target_dev_ua_allocate(dev, 0x29, ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); } break; case TMR_TARGET_WARM_RESET: tmr->response = TMR_FUNCTION_REJECTED; break; case TMR_TARGET_COLD_RESET: tmr->response = TMR_FUNCTION_REJECTED; break; default: pr_err("Unknown TMR function: 0x%02x.\n", tmr->function); tmr->response = TMR_FUNCTION_REJECTED; break; } if (cmd->transport_state & CMD_T_ABORTED) goto aborted; cmd->se_tfo->queue_tm_rsp(cmd); transport_lun_remove_cmd(cmd); transport_cmd_check_stop_to_fabric(cmd); return; aborted: target_handle_abort(cmd); } int transport_generic_handle_tmr( struct se_cmd *cmd) { unsigned long flags; bool aborted = false; spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags); list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list); spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags); if (cmd->transport_state & CMD_T_ABORTED) { aborted = true; } else { cmd->t_state = TRANSPORT_ISTATE_PROCESSING; cmd->transport_state |= CMD_T_ACTIVE; } spin_unlock_irqrestore(&cmd->t_state_lock, flags); if (aborted) { pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n", cmd->se_tmr_req->function, cmd->se_tmr_req->ref_task_tag, cmd->tag); target_handle_abort(cmd); return 0; } INIT_WORK(&cmd->work, target_tmr_work); schedule_work(&cmd->work); return 0; } EXPORT_SYMBOL(transport_generic_handle_tmr); bool target_check_wce(struct se_device *dev) { bool wce = false; if (dev->transport->get_write_cache) wce = dev->transport->get_write_cache(dev); else if (dev->dev_attrib.emulate_write_cache > 0) wce = true; return wce; } bool target_check_fua(struct se_device *dev) { return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0; }
// SPDX-License-Identifier: GPL-2.0-only /* * w83795.c - Linux kernel driver for hardware monitoring * Copyright (C) 2008 Nuvoton Technology Corp. * Wei Song * Copyright (C) 2010 Jean Delvare <[email protected]> * * Supports following chips: * * Chip #vin #fanin #pwm #temp #dts wchipid vendid i2c ISA * w83795g 21 14 8 6 8 0x79 0x5ca3 yes no * w83795adg 18 14 2 6 8 0x79 0x5ca3 yes no */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/jiffies.h> #include <linux/util_macros.h> /* Addresses to scan */ static const unsigned short normal_i2c[] = { 0x2c, 0x2d, 0x2e, 0x2f, I2C_CLIENT_END }; static bool reset; module_param(reset, bool, 0); MODULE_PARM_DESC(reset, "Set to 1 to reset chip, not recommended"); #define W83795_REG_BANKSEL 0x00 #define W83795_REG_VENDORID 0xfd #define W83795_REG_CHIPID 0xfe #define W83795_REG_DEVICEID 0xfb #define W83795_REG_DEVICEID_A 0xff #define W83795_REG_I2C_ADDR 0xfc #define W83795_REG_CONFIG 0x01 #define W83795_REG_CONFIG_CONFIG48 0x04 #define W83795_REG_CONFIG_START 0x01 /* Multi-Function Pin Ctrl Registers */ #define W83795_REG_VOLT_CTRL1 0x02 #define W83795_REG_VOLT_CTRL2 0x03 #define W83795_REG_TEMP_CTRL1 0x04 #define W83795_REG_TEMP_CTRL2 0x05 #define W83795_REG_FANIN_CTRL1 0x06 #define W83795_REG_FANIN_CTRL2 0x07 #define W83795_REG_VMIGB_CTRL 0x08 #define TEMP_READ 0 #define TEMP_CRIT 1 #define TEMP_CRIT_HYST 2 #define TEMP_WARN 3 #define TEMP_WARN_HYST 4 /* * only crit and crit_hyst affect real-time alarm status * current crit crit_hyst warn warn_hyst */ static const u16 W83795_REG_TEMP[][5] = { {0x21, 0x96, 0x97, 0x98, 0x99}, /* TD1/TR1 */ {0x22, 0x9a, 0x9b, 0x9c, 0x9d}, /* TD2/TR2 */ {0x23, 0x9e, 0x9f, 0xa0, 0xa1}, /* TD3/TR3 */ {0x24, 0xa2, 0xa3, 0xa4, 0xa5}, /* TD4/TR4 */ {0x1f, 0xa6, 0xa7, 0xa8, 0xa9}, /* TR5 */ {0x20, 0xaa, 0xab, 0xac, 0xad}, /* TR6 */ }; #define IN_READ 0 #define IN_MAX 1 #define IN_LOW 2 static const u16 W83795_REG_IN[][3] = { /* Current, HL, LL */ {0x10, 0x70, 0x71}, /* VSEN1 */ {0x11, 0x72, 0x73}, /* VSEN2 */ {0x12, 0x74, 0x75}, /* VSEN3 */ {0x13, 0x76, 0x77}, /* VSEN4 */ {0x14, 0x78, 0x79}, /* VSEN5 */ {0x15, 0x7a, 0x7b}, /* VSEN6 */ {0x16, 0x7c, 0x7d}, /* VSEN7 */ {0x17, 0x7e, 0x7f}, /* VSEN8 */ {0x18, 0x80, 0x81}, /* VSEN9 */ {0x19, 0x82, 0x83}, /* VSEN10 */ {0x1A, 0x84, 0x85}, /* VSEN11 */ {0x1B, 0x86, 0x87}, /* VTT */ {0x1C, 0x88, 0x89}, /* 3VDD */ {0x1D, 0x8a, 0x8b}, /* 3VSB */ {0x1E, 0x8c, 0x8d}, /* VBAT */ {0x1F, 0xa6, 0xa7}, /* VSEN12 */ {0x20, 0xaa, 0xab}, /* VSEN13 */ {0x21, 0x96, 0x97}, /* VSEN14 */ {0x22, 0x9a, 0x9b}, /* VSEN15 */ {0x23, 0x9e, 0x9f}, /* VSEN16 */ {0x24, 0xa2, 0xa3}, /* VSEN17 */ }; #define W83795_REG_VRLSB 0x3C static const u8 W83795_REG_IN_HL_LSB[] = { 0x8e, /* VSEN1-4 */ 0x90, /* VSEN5-8 */ 0x92, /* VSEN9-11 */ 0x94, /* VTT, 3VDD, 3VSB, 3VBAT */ 0xa8, /* VSEN12 */ 0xac, /* VSEN13 */ 0x98, /* VSEN14 */ 0x9c, /* VSEN15 */ 0xa0, /* VSEN16 */ 0xa4, /* VSEN17 */ }; #define IN_LSB_REG(index, type) \ (((type) == 1) ? W83795_REG_IN_HL_LSB[(index)] \ : (W83795_REG_IN_HL_LSB[(index)] + 1)) #define IN_LSB_SHIFT 0 #define IN_LSB_IDX 1 static const u8 IN_LSB_SHIFT_IDX[][2] = { /* High/Low LSB shift, LSB No. */ {0x00, 0x00}, /* VSEN1 */ {0x02, 0x00}, /* VSEN2 */ {0x04, 0x00}, /* VSEN3 */ {0x06, 0x00}, /* VSEN4 */ {0x00, 0x01}, /* VSEN5 */ {0x02, 0x01}, /* VSEN6 */ {0x04, 0x01}, /* VSEN7 */ {0x06, 0x01}, /* VSEN8 */ {0x00, 0x02}, /* VSEN9 */ {0x02, 0x02}, /* VSEN10 */ {0x04, 0x02}, /* VSEN11 */ {0x00, 0x03}, /* VTT */ {0x02, 0x03}, /* 3VDD */ {0x04, 0x03}, /* 3VSB */ {0x06, 0x03}, /* VBAT */ {0x06, 0x04}, /* VSEN12 */ {0x06, 0x05}, /* VSEN13 */ {0x06, 0x06}, /* VSEN14 */ {0x06, 0x07}, /* VSEN15 */ {0x06, 0x08}, /* VSEN16 */ {0x06, 0x09}, /* VSEN17 */ }; #define W83795_REG_FAN(index) (0x2E + (index)) #define W83795_REG_FAN_MIN_HL(index) (0xB6 + (index)) #define W83795_REG_FAN_MIN_LSB(index) (0xC4 + (index) / 2) #define W83795_REG_FAN_MIN_LSB_SHIFT(index) \ (((index) & 1) ? 4 : 0) #define W83795_REG_VID_CTRL 0x6A #define W83795_REG_ALARM_CTRL 0x40 #define ALARM_CTRL_RTSACS (1 << 7) #define W83795_REG_ALARM(index) (0x41 + (index)) #define W83795_REG_CLR_CHASSIS 0x4D #define W83795_REG_BEEP(index) (0x50 + (index)) #define W83795_REG_OVT_CFG 0x58 #define OVT_CFG_SEL (1 << 7) #define W83795_REG_FCMS1 0x201 #define W83795_REG_FCMS2 0x208 #define W83795_REG_TFMR(index) (0x202 + (index)) #define W83795_REG_FOMC 0x20F #define W83795_REG_TSS(index) (0x209 + (index)) #define TSS_MAP_RESERVED 0xff static const u8 tss_map[4][6] = { { 0, 1, 2, 3, 4, 5}, { 6, 7, 8, 9, 0, 1}, {10, 11, 12, 13, 2, 3}, { 4, 5, 4, 5, TSS_MAP_RESERVED, TSS_MAP_RESERVED}, }; #define PWM_OUTPUT 0 #define PWM_FREQ 1 #define PWM_START 2 #define PWM_NONSTOP 3 #define PWM_STOP_TIME 4 #define W83795_REG_PWM(index, nr) (0x210 + (nr) * 8 + (index)) #define W83795_REG_FTSH(index) (0x240 + (index) * 2) #define W83795_REG_FTSL(index) (0x241 + (index) * 2) #define W83795_REG_TFTS 0x250 #define TEMP_PWM_TTTI 0 #define TEMP_PWM_CTFS 1 #define TEMP_PWM_HCT 2 #define TEMP_PWM_HOT 3 #define W83795_REG_TTTI(index) (0x260 + (index)) #define W83795_REG_CTFS(index) (0x268 + (index)) #define W83795_REG_HT(index) (0x270 + (index)) #define SF4_TEMP 0 #define SF4_PWM 1 #define W83795_REG_SF4_TEMP(temp_num, index) \ (0x280 + 0x10 * (temp_num) + (index)) #define W83795_REG_SF4_PWM(temp_num, index) \ (0x288 + 0x10 * (temp_num) + (index)) #define W83795_REG_DTSC 0x301 #define W83795_REG_DTSE 0x302 #define W83795_REG_DTS(index) (0x26 + (index)) #define W83795_REG_PECI_TBASE(index) (0x320 + (index)) #define DTS_CRIT 0 #define DTS_CRIT_HYST 1 #define DTS_WARN 2 #define DTS_WARN_HYST 3 #define W83795_REG_DTS_EXT(index) (0xB2 + (index)) #define SETUP_PWM_DEFAULT 0 #define SETUP_PWM_UPTIME 1 #define SETUP_PWM_DOWNTIME 2 #define W83795_REG_SETUP_PWM(index) (0x20C + (index)) static inline u16 in_from_reg(u8 index, u16 val) { /* 3VDD, 3VSB and VBAT: 6 mV/bit; other inputs: 2 mV/bit */ if (index >= 12 && index <= 14) return val * 6; else return val * 2; } static inline u16 in_to_reg(u8 index, u16 val) { if (index >= 12 && index <= 14) return val / 6; else return val / 2; } static inline unsigned long fan_from_reg(u16 val) { if ((val == 0xfff) || (val == 0)) return 0; return 1350000UL / val; } static inline u16 fan_to_reg(long rpm) { if (rpm <= 0) return 0x0fff; return clamp_val((1350000 + (rpm >> 1)) / rpm, 1, 0xffe); } static inline unsigned long time_from_reg(u8 reg) { return reg * 100; } static inline u8 time_to_reg(unsigned long val) { return clamp_val((val + 50) / 100, 0, 0xff); } static inline long temp_from_reg(s8 reg) { return reg * 1000; } static inline s8 temp_to_reg(long val, s8 min, s8 max) { return clamp_val(val / 1000, min, max); } static const u16 pwm_freq_cksel0[16] = { 1024, 512, 341, 256, 205, 171, 146, 128, 85, 64, 32, 16, 8, 4, 2, 1 }; static unsigned int pwm_freq_from_reg(u8 reg, u16 clkin) { unsigned long base_clock; if (reg & 0x80) { base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256); return base_clock / ((reg & 0x7f) + 1); } else return pwm_freq_cksel0[reg & 0x0f]; } static u8 pwm_freq_to_reg(unsigned long val, u16 clkin) { unsigned long base_clock; u8 reg0, reg1; unsigned long best0, best1; /* Best fit for cksel = 0 */ reg0 = find_closest_descending(val, pwm_freq_cksel0, ARRAY_SIZE(pwm_freq_cksel0)); if (val < 375) /* cksel = 1 can't beat this */ return reg0; best0 = pwm_freq_cksel0[reg0]; /* Best fit for cksel = 1 */ base_clock = clkin * 1000 / ((clkin == 48000) ? 384 : 256); reg1 = clamp_val(DIV_ROUND_CLOSEST(base_clock, val), 1, 128); best1 = base_clock / reg1; reg1 = 0x80 | (reg1 - 1); /* Choose the closest one */ if (abs(val - best0) > abs(val - best1)) return reg1; else return reg0; } enum chip_types {w83795g, w83795adg}; struct w83795_data { struct device *hwmon_dev; struct mutex update_lock; unsigned long last_updated; /* In jiffies */ enum chip_types chip_type; u8 bank; u32 has_in; /* Enable monitor VIN or not */ u8 has_dyn_in; /* Only in2-0 can have this */ u16 in[21][3]; /* Register value, read/high/low */ u8 in_lsb[10][3]; /* LSB Register value, high/low */ u8 has_gain; /* has gain: in17-20 * 8 */ u16 has_fan; /* Enable fan14-1 or not */ u16 fan[14]; /* Register value combine */ u16 fan_min[14]; /* Register value combine */ u8 has_temp; /* Enable monitor temp6-1 or not */ s8 temp[6][5]; /* current, crit, crit_hyst, warn, warn_hyst */ u8 temp_read_vrlsb[6]; u8 temp_mode; /* Bit vector, 0 = TR, 1 = TD */ u8 temp_src[3]; /* Register value */ u8 enable_dts; /* * Enable PECI and SB-TSI, * bit 0: =1 enable, =0 disable, * bit 1: =1 AMD SB-TSI, =0 Intel PECI */ u8 has_dts; /* Enable monitor DTS temp */ s8 dts[8]; /* Register value */ u8 dts_read_vrlsb[8]; /* Register value */ s8 dts_ext[4]; /* Register value */ u8 has_pwm; /* * 795g supports 8 pwm, 795adg only supports 2, * no config register, only affected by chip * type */ u8 pwm[8][5]; /* * Register value, output, freq, start, * non stop, stop time */ u16 clkin; /* CLKIN frequency in kHz */ u8 pwm_fcms[2]; /* Register value */ u8 pwm_tfmr[6]; /* Register value */ u8 pwm_fomc; /* Register value */ u16 target_speed[8]; /* * Register value, target speed for speed * cruise */ u8 tol_speed; /* tolerance of target speed */ u8 pwm_temp[6][4]; /* TTTI, CTFS, HCT, HOT */ u8 sf4_reg[6][2][7]; /* 6 temp, temp/dcpwm, 7 registers */ u8 setup_pwm[3]; /* Register value */ u8 alarms[6]; /* Register value */ u8 enable_beep; u8 beeps[6]; /* Register value */ bool valid; char valid_limits; char valid_pwm_config; }; /* * Hardware access * We assume that nobdody can change the bank outside the driver. */ /* Must be called with data->update_lock held, except during initialization */ static int w83795_set_bank(struct i2c_client *client, u8 bank) { struct w83795_data *data = i2c_get_clientdata(client); int err; /* If the same bank is already set, nothing to do */ if ((data->bank & 0x07) == bank) return 0; /* Change to new bank, preserve all other bits */ bank |= data->bank & ~0x07; err = i2c_smbus_write_byte_data(client, W83795_REG_BANKSEL, bank); if (err < 0) { dev_err(&client->dev, "Failed to set bank to %d, err %d\n", (int)bank, err); return err; } data->bank = bank; return 0; } /* Must be called with data->update_lock held, except during initialization */ static u8 w83795_read(struct i2c_client *client, u16 reg) { int err; err = w83795_set_bank(client, reg >> 8); if (err < 0) return 0x00; /* Arbitrary */ err = i2c_smbus_read_byte_data(client, reg & 0xff); if (err < 0) { dev_err(&client->dev, "Failed to read from register 0x%03x, err %d\n", (int)reg, err); return 0x00; /* Arbitrary */ } return err; } /* Must be called with data->update_lock held, except during initialization */ static int w83795_write(struct i2c_client *client, u16 reg, u8 value) { int err; err = w83795_set_bank(client, reg >> 8); if (err < 0) return err; err = i2c_smbus_write_byte_data(client, reg & 0xff, value); if (err < 0) dev_err(&client->dev, "Failed to write to register 0x%03x, err %d\n", (int)reg, err); return err; } static void w83795_update_limits(struct i2c_client *client) { struct w83795_data *data = i2c_get_clientdata(client); int i, limit; u8 lsb; /* Read the voltage limits */ for (i = 0; i < ARRAY_SIZE(data->in); i++) { if (!(data->has_in & (1 << i))) continue; data->in[i][IN_MAX] = w83795_read(client, W83795_REG_IN[i][IN_MAX]); data->in[i][IN_LOW] = w83795_read(client, W83795_REG_IN[i][IN_LOW]); } for (i = 0; i < ARRAY_SIZE(data->in_lsb); i++) { if ((i == 2 && data->chip_type == w83795adg) || (i >= 4 && !(data->has_in & (1 << (i + 11))))) continue; data->in_lsb[i][IN_MAX] = w83795_read(client, IN_LSB_REG(i, IN_MAX)); data->in_lsb[i][IN_LOW] = w83795_read(client, IN_LSB_REG(i, IN_LOW)); } /* Read the fan limits */ lsb = 0; /* Silent false gcc warning */ for (i = 0; i < ARRAY_SIZE(data->fan); i++) { /* * Each register contains LSB for 2 fans, but we want to * read it only once to save time */ if ((i & 1) == 0 && (data->has_fan & (3 << i))) lsb = w83795_read(client, W83795_REG_FAN_MIN_LSB(i)); if (!(data->has_fan & (1 << i))) continue; data->fan_min[i] = w83795_read(client, W83795_REG_FAN_MIN_HL(i)) << 4; data->fan_min[i] |= (lsb >> W83795_REG_FAN_MIN_LSB_SHIFT(i)) & 0x0F; } /* Read the temperature limits */ for (i = 0; i < ARRAY_SIZE(data->temp); i++) { if (!(data->has_temp & (1 << i))) continue; for (limit = TEMP_CRIT; limit <= TEMP_WARN_HYST; limit++) data->temp[i][limit] = w83795_read(client, W83795_REG_TEMP[i][limit]); } /* Read the DTS limits */ if (data->enable_dts) { for (limit = DTS_CRIT; limit <= DTS_WARN_HYST; limit++) data->dts_ext[limit] = w83795_read(client, W83795_REG_DTS_EXT(limit)); } /* Read beep settings */ if (data->enable_beep) { for (i = 0; i < ARRAY_SIZE(data->beeps); i++) data->beeps[i] = w83795_read(client, W83795_REG_BEEP(i)); } data->valid_limits = 1; } static struct w83795_data *w83795_update_pwm_config(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); int i, tmp; mutex_lock(&data->update_lock); if (data->valid_pwm_config) goto END; /* Read temperature source selection */ for (i = 0; i < ARRAY_SIZE(data->temp_src); i++) data->temp_src[i] = w83795_read(client, W83795_REG_TSS(i)); /* Read automatic fan speed control settings */ data->pwm_fcms[0] = w83795_read(client, W83795_REG_FCMS1); data->pwm_fcms[1] = w83795_read(client, W83795_REG_FCMS2); for (i = 0; i < ARRAY_SIZE(data->pwm_tfmr); i++) data->pwm_tfmr[i] = w83795_read(client, W83795_REG_TFMR(i)); data->pwm_fomc = w83795_read(client, W83795_REG_FOMC); for (i = 0; i < data->has_pwm; i++) { for (tmp = PWM_FREQ; tmp <= PWM_STOP_TIME; tmp++) data->pwm[i][tmp] = w83795_read(client, W83795_REG_PWM(i, tmp)); } for (i = 0; i < ARRAY_SIZE(data->target_speed); i++) { data->target_speed[i] = w83795_read(client, W83795_REG_FTSH(i)) << 4; data->target_speed[i] |= w83795_read(client, W83795_REG_FTSL(i)) >> 4; } data->tol_speed = w83795_read(client, W83795_REG_TFTS) & 0x3f; for (i = 0; i < ARRAY_SIZE(data->pwm_temp); i++) { data->pwm_temp[i][TEMP_PWM_TTTI] = w83795_read(client, W83795_REG_TTTI(i)) & 0x7f; data->pwm_temp[i][TEMP_PWM_CTFS] = w83795_read(client, W83795_REG_CTFS(i)); tmp = w83795_read(client, W83795_REG_HT(i)); data->pwm_temp[i][TEMP_PWM_HCT] = tmp >> 4; data->pwm_temp[i][TEMP_PWM_HOT] = tmp & 0x0f; } /* Read SmartFanIV trip points */ for (i = 0; i < ARRAY_SIZE(data->sf4_reg); i++) { for (tmp = 0; tmp < 7; tmp++) { data->sf4_reg[i][SF4_TEMP][tmp] = w83795_read(client, W83795_REG_SF4_TEMP(i, tmp)); data->sf4_reg[i][SF4_PWM][tmp] = w83795_read(client, W83795_REG_SF4_PWM(i, tmp)); } } /* Read setup PWM */ for (i = 0; i < ARRAY_SIZE(data->setup_pwm); i++) data->setup_pwm[i] = w83795_read(client, W83795_REG_SETUP_PWM(i)); data->valid_pwm_config = 1; END: mutex_unlock(&data->update_lock); return data; } static struct w83795_data *w83795_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); u16 tmp; u8 intrusion; int i; mutex_lock(&data->update_lock); if (!data->valid_limits) w83795_update_limits(client); if (!(time_after(jiffies, data->last_updated + HZ * 2) || !data->valid)) goto END; /* Update the voltages value */ for (i = 0; i < ARRAY_SIZE(data->in); i++) { if (!(data->has_in & (1 << i))) continue; tmp = w83795_read(client, W83795_REG_IN[i][IN_READ]) << 2; tmp |= w83795_read(client, W83795_REG_VRLSB) >> 6; data->in[i][IN_READ] = tmp; } /* in0-2 can have dynamic limits (W83795G only) */ if (data->has_dyn_in) { u8 lsb_max = w83795_read(client, IN_LSB_REG(0, IN_MAX)); u8 lsb_low = w83795_read(client, IN_LSB_REG(0, IN_LOW)); for (i = 0; i < 3; i++) { if (!(data->has_dyn_in & (1 << i))) continue; data->in[i][IN_MAX] = w83795_read(client, W83795_REG_IN[i][IN_MAX]); data->in[i][IN_LOW] = w83795_read(client, W83795_REG_IN[i][IN_LOW]); data->in_lsb[i][IN_MAX] = (lsb_max >> (2 * i)) & 0x03; data->in_lsb[i][IN_LOW] = (lsb_low >> (2 * i)) & 0x03; } } /* Update fan */ for (i = 0; i < ARRAY_SIZE(data->fan); i++) { if (!(data->has_fan & (1 << i))) continue; data->fan[i] = w83795_read(client, W83795_REG_FAN(i)) << 4; data->fan[i] |= w83795_read(client, W83795_REG_VRLSB) >> 4; } /* Update temperature */ for (i = 0; i < ARRAY_SIZE(data->temp); i++) { data->temp[i][TEMP_READ] = w83795_read(client, W83795_REG_TEMP[i][TEMP_READ]); data->temp_read_vrlsb[i] = w83795_read(client, W83795_REG_VRLSB); } /* Update dts temperature */ if (data->enable_dts) { for (i = 0; i < ARRAY_SIZE(data->dts); i++) { if (!(data->has_dts & (1 << i))) continue; data->dts[i] = w83795_read(client, W83795_REG_DTS(i)); data->dts_read_vrlsb[i] = w83795_read(client, W83795_REG_VRLSB); } } /* Update pwm output */ for (i = 0; i < data->has_pwm; i++) { data->pwm[i][PWM_OUTPUT] = w83795_read(client, W83795_REG_PWM(i, PWM_OUTPUT)); } /* * Update intrusion and alarms * It is important to read intrusion first, because reading from * register SMI STS6 clears the interrupt status temporarily. */ tmp = w83795_read(client, W83795_REG_ALARM_CTRL); /* Switch to interrupt status for intrusion if needed */ if (tmp & ALARM_CTRL_RTSACS) w83795_write(client, W83795_REG_ALARM_CTRL, tmp & ~ALARM_CTRL_RTSACS); intrusion = w83795_read(client, W83795_REG_ALARM(5)) & (1 << 6); /* Switch to real-time alarms */ w83795_write(client, W83795_REG_ALARM_CTRL, tmp | ALARM_CTRL_RTSACS); for (i = 0; i < ARRAY_SIZE(data->alarms); i++) data->alarms[i] = w83795_read(client, W83795_REG_ALARM(i)); data->alarms[5] |= intrusion; /* Restore original configuration if needed */ if (!(tmp & ALARM_CTRL_RTSACS)) w83795_write(client, W83795_REG_ALARM_CTRL, tmp & ~ALARM_CTRL_RTSACS); data->last_updated = jiffies; data->valid = true; END: mutex_unlock(&data->update_lock); return data; } /* * Sysfs attributes */ #define ALARM_STATUS 0 #define BEEP_ENABLE 1 static ssize_t show_alarm_beep(struct device *dev, struct device_attribute *attr, char *buf) { struct w83795_data *data = w83795_update_device(dev); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index >> 3; int bit = sensor_attr->index & 0x07; u8 val; if (nr == ALARM_STATUS) val = (data->alarms[index] >> bit) & 1; else /* BEEP_ENABLE */ val = (data->beeps[index] >> bit) & 1; return sprintf(buf, "%u\n", val); } static ssize_t store_beep(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int index = sensor_attr->index >> 3; int shift = sensor_attr->index & 0x07; u8 beep_bit = 1 << shift; unsigned long val; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; if (val != 0 && val != 1) return -EINVAL; mutex_lock(&data->update_lock); data->beeps[index] = w83795_read(client, W83795_REG_BEEP(index)); data->beeps[index] &= ~beep_bit; data->beeps[index] |= val << shift; w83795_write(client, W83795_REG_BEEP(index), data->beeps[index]); mutex_unlock(&data->update_lock); return count; } /* Write 0 to clear chassis alarm */ static ssize_t store_chassis_clear(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); unsigned long val; if (kstrtoul(buf, 10, &val) < 0 || val != 0) return -EINVAL; mutex_lock(&data->update_lock); val = w83795_read(client, W83795_REG_CLR_CHASSIS); val |= 0x80; w83795_write(client, W83795_REG_CLR_CHASSIS, val); /* Clear status and force cache refresh */ w83795_read(client, W83795_REG_ALARM(5)); data->valid = false; mutex_unlock(&data->update_lock); return count; } #define FAN_INPUT 0 #define FAN_MIN 1 static ssize_t show_fan(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct w83795_data *data = w83795_update_device(dev); u16 val; if (nr == FAN_INPUT) val = data->fan[index] & 0x0fff; else val = data->fan_min[index] & 0x0fff; return sprintf(buf, "%lu\n", fan_from_reg(val)); } static ssize_t store_fan_min(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int index = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); unsigned long val; if (kstrtoul(buf, 10, &val)) return -EINVAL; val = fan_to_reg(val); mutex_lock(&data->update_lock); data->fan_min[index] = val; w83795_write(client, W83795_REG_FAN_MIN_HL(index), (val >> 4) & 0xff); val &= 0x0f; if (index & 1) { val <<= 4; val |= w83795_read(client, W83795_REG_FAN_MIN_LSB(index)) & 0x0f; } else { val |= w83795_read(client, W83795_REG_FAN_MIN_LSB(index)) & 0xf0; } w83795_write(client, W83795_REG_FAN_MIN_LSB(index), val & 0xff); mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct w83795_data *data; struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; unsigned int val; data = nr == PWM_OUTPUT ? w83795_update_device(dev) : w83795_update_pwm_config(dev); switch (nr) { case PWM_STOP_TIME: val = time_from_reg(data->pwm[index][nr]); break; case PWM_FREQ: val = pwm_freq_from_reg(data->pwm[index][nr], data->clkin); break; default: val = data->pwm[index][nr]; break; } return sprintf(buf, "%u\n", val); } static ssize_t store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; unsigned long val; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; mutex_lock(&data->update_lock); switch (nr) { case PWM_STOP_TIME: val = time_to_reg(val); break; case PWM_FREQ: val = pwm_freq_to_reg(val, data->clkin); break; default: val = clamp_val(val, 0, 0xff); break; } w83795_write(client, W83795_REG_PWM(index, nr), val); data->pwm[index][nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); struct w83795_data *data = w83795_update_pwm_config(dev); int index = sensor_attr->index; u8 tmp; /* Speed cruise mode */ if (data->pwm_fcms[0] & (1 << index)) { tmp = 2; goto out; } /* Thermal cruise or SmartFan IV mode */ for (tmp = 0; tmp < 6; tmp++) { if (data->pwm_tfmr[tmp] & (1 << index)) { tmp = 3; goto out; } } /* Manual mode */ tmp = 1; out: return sprintf(buf, "%u\n", tmp); } static ssize_t store_pwm_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = w83795_update_pwm_config(dev); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int index = sensor_attr->index; unsigned long val; int i; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; if (val < 1 || val > 2) return -EINVAL; #ifndef CONFIG_SENSORS_W83795_FANCTRL if (val > 1) { dev_warn(dev, "Automatic fan speed control support disabled\n"); dev_warn(dev, "Build with CONFIG_SENSORS_W83795_FANCTRL=y if you want it\n"); return -EOPNOTSUPP; } #endif mutex_lock(&data->update_lock); switch (val) { case 1: /* Clear speed cruise mode bits */ data->pwm_fcms[0] &= ~(1 << index); w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]); /* Clear thermal cruise mode bits */ for (i = 0; i < 6; i++) { data->pwm_tfmr[i] &= ~(1 << index); w83795_write(client, W83795_REG_TFMR(i), data->pwm_tfmr[i]); } break; case 2: data->pwm_fcms[0] |= (1 << index); w83795_write(client, W83795_REG_FCMS1, data->pwm_fcms[0]); break; } mutex_unlock(&data->update_lock); return count; } static ssize_t show_pwm_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct w83795_data *data = w83795_update_pwm_config(dev); int index = to_sensor_dev_attr_2(attr)->index; unsigned int mode; if (data->pwm_fomc & (1 << index)) mode = 0; /* DC */ else mode = 1; /* PWM */ return sprintf(buf, "%u\n", mode); } /* * Check whether a given temperature source can ever be useful. * Returns the number of selectable temperature channels which are * enabled. */ static int w83795_tss_useful(const struct w83795_data *data, int tsrc) { int useful = 0, i; for (i = 0; i < 4; i++) { if (tss_map[i][tsrc] == TSS_MAP_RESERVED) continue; if (tss_map[i][tsrc] < 6) /* Analog */ useful += (data->has_temp >> tss_map[i][tsrc]) & 1; else /* Digital */ useful += (data->has_dts >> (tss_map[i][tsrc] - 6)) & 1; } return useful; } static ssize_t show_temp_src(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); struct w83795_data *data = w83795_update_pwm_config(dev); int index = sensor_attr->index; u8 tmp = data->temp_src[index / 2]; if (index & 1) tmp >>= 4; /* Pick high nibble */ else tmp &= 0x0f; /* Pick low nibble */ /* Look-up the actual temperature channel number */ if (tmp >= 4 || tss_map[tmp][index] == TSS_MAP_RESERVED) return -EINVAL; /* Shouldn't happen */ return sprintf(buf, "%u\n", (unsigned int)tss_map[tmp][index] + 1); } static ssize_t store_temp_src(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = w83795_update_pwm_config(dev); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int index = sensor_attr->index; int tmp; unsigned long channel; u8 val = index / 2; if (kstrtoul(buf, 10, &channel) < 0 || channel < 1 || channel > 14) return -EINVAL; /* Check if request can be fulfilled */ for (tmp = 0; tmp < 4; tmp++) { if (tss_map[tmp][index] == channel - 1) break; } if (tmp == 4) /* No match */ return -EINVAL; mutex_lock(&data->update_lock); if (index & 1) { tmp <<= 4; data->temp_src[val] &= 0x0f; } else { data->temp_src[val] &= 0xf0; } data->temp_src[val] |= tmp; w83795_write(client, W83795_REG_TSS(val), data->temp_src[val]); mutex_unlock(&data->update_lock); return count; } #define TEMP_PWM_ENABLE 0 #define TEMP_PWM_FAN_MAP 1 static ssize_t show_temp_pwm_enable(struct device *dev, struct device_attribute *attr, char *buf) { struct w83795_data *data = w83795_update_pwm_config(dev); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; u8 tmp = 0xff; switch (nr) { case TEMP_PWM_ENABLE: tmp = (data->pwm_fcms[1] >> index) & 1; if (tmp) tmp = 4; else tmp = 3; break; case TEMP_PWM_FAN_MAP: tmp = data->pwm_tfmr[index]; break; } return sprintf(buf, "%u\n", tmp); } static ssize_t store_temp_pwm_enable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = w83795_update_pwm_config(dev); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; unsigned long tmp; if (kstrtoul(buf, 10, &tmp) < 0) return -EINVAL; switch (nr) { case TEMP_PWM_ENABLE: if (tmp != 3 && tmp != 4) return -EINVAL; tmp -= 3; mutex_lock(&data->update_lock); data->pwm_fcms[1] &= ~(1 << index); data->pwm_fcms[1] |= tmp << index; w83795_write(client, W83795_REG_FCMS2, data->pwm_fcms[1]); mutex_unlock(&data->update_lock); break; case TEMP_PWM_FAN_MAP: mutex_lock(&data->update_lock); tmp = clamp_val(tmp, 0, 0xff); w83795_write(client, W83795_REG_TFMR(index), tmp); data->pwm_tfmr[index] = tmp; mutex_unlock(&data->update_lock); break; } return count; } #define FANIN_TARGET 0 #define FANIN_TOL 1 static ssize_t show_fanin(struct device *dev, struct device_attribute *attr, char *buf) { struct w83795_data *data = w83795_update_pwm_config(dev); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; u16 tmp = 0; switch (nr) { case FANIN_TARGET: tmp = fan_from_reg(data->target_speed[index]); break; case FANIN_TOL: tmp = data->tol_speed; break; } return sprintf(buf, "%u\n", tmp); } static ssize_t store_fanin(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; unsigned long val; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; mutex_lock(&data->update_lock); switch (nr) { case FANIN_TARGET: val = fan_to_reg(clamp_val(val, 0, 0xfff)); w83795_write(client, W83795_REG_FTSH(index), val >> 4); w83795_write(client, W83795_REG_FTSL(index), (val << 4) & 0xf0); data->target_speed[index] = val; break; case FANIN_TOL: val = clamp_val(val, 0, 0x3f); w83795_write(client, W83795_REG_TFTS, val); data->tol_speed = val; break; } mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct w83795_data *data = w83795_update_pwm_config(dev); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; long tmp = temp_from_reg(data->pwm_temp[index][nr]); return sprintf(buf, "%ld\n", tmp); } static ssize_t store_temp_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; unsigned long val; u8 tmp; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; val /= 1000; mutex_lock(&data->update_lock); switch (nr) { case TEMP_PWM_TTTI: val = clamp_val(val, 0, 0x7f); w83795_write(client, W83795_REG_TTTI(index), val); break; case TEMP_PWM_CTFS: val = clamp_val(val, 0, 0x7f); w83795_write(client, W83795_REG_CTFS(index), val); break; case TEMP_PWM_HCT: val = clamp_val(val, 0, 0x0f); tmp = w83795_read(client, W83795_REG_HT(index)); tmp &= 0x0f; tmp |= (val << 4) & 0xf0; w83795_write(client, W83795_REG_HT(index), tmp); break; case TEMP_PWM_HOT: val = clamp_val(val, 0, 0x0f); tmp = w83795_read(client, W83795_REG_HT(index)); tmp &= 0xf0; tmp |= val & 0x0f; w83795_write(client, W83795_REG_HT(index), tmp); break; } data->pwm_temp[index][nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_sf4_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct w83795_data *data = w83795_update_pwm_config(dev); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; return sprintf(buf, "%u\n", data->sf4_reg[index][SF4_PWM][nr]); } static ssize_t store_sf4_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; unsigned long val; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; mutex_lock(&data->update_lock); w83795_write(client, W83795_REG_SF4_PWM(index, nr), val); data->sf4_reg[index][SF4_PWM][nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_sf4_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct w83795_data *data = w83795_update_pwm_config(dev); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; return sprintf(buf, "%u\n", (data->sf4_reg[index][SF4_TEMP][nr]) * 1000); } static ssize_t store_sf4_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; unsigned long val; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; val /= 1000; mutex_lock(&data->update_lock); w83795_write(client, W83795_REG_SF4_TEMP(index, nr), val); data->sf4_reg[index][SF4_TEMP][nr] = val; mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct w83795_data *data = w83795_update_device(dev); long temp = temp_from_reg(data->temp[index][nr]); if (nr == TEMP_READ) temp += (data->temp_read_vrlsb[index] >> 6) * 250; return sprintf(buf, "%ld\n", temp); } static ssize_t store_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); long tmp; if (kstrtol(buf, 10, &tmp) < 0) return -EINVAL; mutex_lock(&data->update_lock); data->temp[index][nr] = temp_to_reg(tmp, -128, 127); w83795_write(client, W83795_REG_TEMP[index][nr], data->temp[index][nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_dts_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct w83795_data *data = dev_get_drvdata(dev); int tmp; if (data->enable_dts & 2) tmp = 5; else tmp = 6; return sprintf(buf, "%d\n", tmp); } static ssize_t show_dts(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int index = sensor_attr->index; struct w83795_data *data = w83795_update_device(dev); long temp = temp_from_reg(data->dts[index]); temp += (data->dts_read_vrlsb[index] >> 6) * 250; return sprintf(buf, "%ld\n", temp); } static ssize_t show_dts_ext(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; struct w83795_data *data = dev_get_drvdata(dev); long temp = temp_from_reg(data->dts_ext[nr]); return sprintf(buf, "%ld\n", temp); } static ssize_t store_dts_ext(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); long tmp; if (kstrtol(buf, 10, &tmp) < 0) return -EINVAL; mutex_lock(&data->update_lock); data->dts_ext[nr] = temp_to_reg(tmp, -128, 127); w83795_write(client, W83795_REG_DTS_EXT(nr), data->dts_ext[nr]); mutex_unlock(&data->update_lock); return count; } static ssize_t show_temp_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct w83795_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int index = sensor_attr->index; int tmp; if (data->temp_mode & (1 << index)) tmp = 3; /* Thermal diode */ else tmp = 4; /* Thermistor */ return sprintf(buf, "%d\n", tmp); } /* Only for temp1-4 (temp5-6 can only be thermistor) */ static ssize_t store_temp_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int index = sensor_attr->index; int reg_shift; unsigned long val; u8 tmp; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; if ((val != 4) && (val != 3)) return -EINVAL; mutex_lock(&data->update_lock); if (val == 3) { /* Thermal diode */ val = 0x01; data->temp_mode |= 1 << index; } else if (val == 4) { /* Thermistor */ val = 0x03; data->temp_mode &= ~(1 << index); } reg_shift = 2 * index; tmp = w83795_read(client, W83795_REG_TEMP_CTRL2); tmp &= ~(0x03 << reg_shift); tmp |= val << reg_shift; w83795_write(client, W83795_REG_TEMP_CTRL2, tmp); mutex_unlock(&data->update_lock); return count; } /* show/store VIN */ static ssize_t show_in(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct w83795_data *data = w83795_update_device(dev); u16 val = data->in[index][nr]; u8 lsb_idx; switch (nr) { case IN_READ: /* calculate this value again by sensors as sensors3.conf */ if ((index >= 17) && !((data->has_gain >> (index - 17)) & 1)) val *= 8; break; case IN_MAX: case IN_LOW: lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX]; val <<= 2; val |= (data->in_lsb[lsb_idx][nr] >> IN_LSB_SHIFT_IDX[index][IN_LSB_SHIFT]) & 0x03; if ((index >= 17) && !((data->has_gain >> (index - 17)) & 1)) val *= 8; break; } val = in_from_reg(index, val); return sprintf(buf, "%d\n", val); } static ssize_t store_in(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; int index = sensor_attr->index; struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); unsigned long val; u8 tmp; u8 lsb_idx; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; val = in_to_reg(index, val); if ((index >= 17) && !((data->has_gain >> (index - 17)) & 1)) val /= 8; val = clamp_val(val, 0, 0x3FF); mutex_lock(&data->update_lock); lsb_idx = IN_LSB_SHIFT_IDX[index][IN_LSB_IDX]; tmp = w83795_read(client, IN_LSB_REG(lsb_idx, nr)); tmp &= ~(0x03 << IN_LSB_SHIFT_IDX[index][IN_LSB_SHIFT]); tmp |= (val & 0x03) << IN_LSB_SHIFT_IDX[index][IN_LSB_SHIFT]; w83795_write(client, IN_LSB_REG(lsb_idx, nr), tmp); data->in_lsb[lsb_idx][nr] = tmp; tmp = (val >> 2) & 0xff; w83795_write(client, W83795_REG_IN[index][nr], tmp); data->in[index][nr] = tmp; mutex_unlock(&data->update_lock); return count; } #ifdef CONFIG_SENSORS_W83795_FANCTRL static ssize_t show_sf_setup(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; struct w83795_data *data = w83795_update_pwm_config(dev); u16 val = data->setup_pwm[nr]; switch (nr) { case SETUP_PWM_UPTIME: case SETUP_PWM_DOWNTIME: val = time_from_reg(val); break; } return sprintf(buf, "%d\n", val); } static ssize_t store_sf_setup(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute_2 *sensor_attr = to_sensor_dev_attr_2(attr); int nr = sensor_attr->nr; struct i2c_client *client = to_i2c_client(dev); struct w83795_data *data = i2c_get_clientdata(client); unsigned long val; if (kstrtoul(buf, 10, &val) < 0) return -EINVAL; switch (nr) { case SETUP_PWM_DEFAULT: val = clamp_val(val, 0, 0xff); break; case SETUP_PWM_UPTIME: case SETUP_PWM_DOWNTIME: val = time_to_reg(val); if (val == 0) return -EINVAL; break; } mutex_lock(&data->update_lock); data->setup_pwm[nr] = val; w83795_write(client, W83795_REG_SETUP_PWM(nr), val); mutex_unlock(&data->update_lock); return count; } #endif #define NOT_USED -1 /* * Don't change the attribute order, _max, _min and _beep are accessed by index * somewhere else in the code */ #define SENSOR_ATTR_IN(index) { \ SENSOR_ATTR_2(in##index##_input, S_IRUGO, show_in, NULL, \ IN_READ, index), \ SENSOR_ATTR_2(in##index##_max, S_IRUGO | S_IWUSR, show_in, \ store_in, IN_MAX, index), \ SENSOR_ATTR_2(in##index##_min, S_IRUGO | S_IWUSR, show_in, \ store_in, IN_LOW, index), \ SENSOR_ATTR_2(in##index##_alarm, S_IRUGO, show_alarm_beep, \ NULL, ALARM_STATUS, index + ((index > 14) ? 1 : 0)), \ SENSOR_ATTR_2(in##index##_beep, S_IWUSR | S_IRUGO, \ show_alarm_beep, store_beep, BEEP_ENABLE, \ index + ((index > 14) ? 1 : 0)) } /* * Don't change the attribute order, _beep is accessed by index * somewhere else in the code */ #define SENSOR_ATTR_FAN(index) { \ SENSOR_ATTR_2(fan##index##_input, S_IRUGO, show_fan, \ NULL, FAN_INPUT, index - 1), \ SENSOR_ATTR_2(fan##index##_min, S_IWUSR | S_IRUGO, \ show_fan, store_fan_min, FAN_MIN, index - 1), \ SENSOR_ATTR_2(fan##index##_alarm, S_IRUGO, show_alarm_beep, \ NULL, ALARM_STATUS, index + 31), \ SENSOR_ATTR_2(fan##index##_beep, S_IWUSR | S_IRUGO, \ show_alarm_beep, store_beep, BEEP_ENABLE, index + 31) } #define SENSOR_ATTR_PWM(index) { \ SENSOR_ATTR_2(pwm##index, S_IWUSR | S_IRUGO, show_pwm, \ store_pwm, PWM_OUTPUT, index - 1), \ SENSOR_ATTR_2(pwm##index##_enable, S_IWUSR | S_IRUGO, \ show_pwm_enable, store_pwm_enable, NOT_USED, index - 1), \ SENSOR_ATTR_2(pwm##index##_mode, S_IRUGO, \ show_pwm_mode, NULL, NOT_USED, index - 1), \ SENSOR_ATTR_2(pwm##index##_freq, S_IWUSR | S_IRUGO, \ show_pwm, store_pwm, PWM_FREQ, index - 1), \ SENSOR_ATTR_2(pwm##index##_nonstop, S_IWUSR | S_IRUGO, \ show_pwm, store_pwm, PWM_NONSTOP, index - 1), \ SENSOR_ATTR_2(pwm##index##_start, S_IWUSR | S_IRUGO, \ show_pwm, store_pwm, PWM_START, index - 1), \ SENSOR_ATTR_2(pwm##index##_stop_time, S_IWUSR | S_IRUGO, \ show_pwm, store_pwm, PWM_STOP_TIME, index - 1), \ SENSOR_ATTR_2(fan##index##_target, S_IWUSR | S_IRUGO, \ show_fanin, store_fanin, FANIN_TARGET, index - 1) } /* * Don't change the attribute order, _beep is accessed by index * somewhere else in the code */ #define SENSOR_ATTR_DTS(index) { \ SENSOR_ATTR_2(temp##index##_type, S_IRUGO , \ show_dts_mode, NULL, NOT_USED, index - 7), \ SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_dts, \ NULL, NOT_USED, index - 7), \ SENSOR_ATTR_2(temp##index##_crit, S_IRUGO | S_IWUSR, show_dts_ext, \ store_dts_ext, DTS_CRIT, NOT_USED), \ SENSOR_ATTR_2(temp##index##_crit_hyst, S_IRUGO | S_IWUSR, \ show_dts_ext, store_dts_ext, DTS_CRIT_HYST, NOT_USED), \ SENSOR_ATTR_2(temp##index##_max, S_IRUGO | S_IWUSR, show_dts_ext, \ store_dts_ext, DTS_WARN, NOT_USED), \ SENSOR_ATTR_2(temp##index##_max_hyst, S_IRUGO | S_IWUSR, \ show_dts_ext, store_dts_ext, DTS_WARN_HYST, NOT_USED), \ SENSOR_ATTR_2(temp##index##_alarm, S_IRUGO, \ show_alarm_beep, NULL, ALARM_STATUS, index + 17), \ SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ show_alarm_beep, store_beep, BEEP_ENABLE, index + 17) } /* * Don't change the attribute order, _beep is accessed by index * somewhere else in the code */ #define SENSOR_ATTR_TEMP(index) { \ SENSOR_ATTR_2(temp##index##_type, S_IRUGO | (index < 5 ? S_IWUSR : 0), \ show_temp_mode, store_temp_mode, NOT_USED, index - 1), \ SENSOR_ATTR_2(temp##index##_input, S_IRUGO, show_temp, \ NULL, TEMP_READ, index - 1), \ SENSOR_ATTR_2(temp##index##_crit, S_IRUGO | S_IWUSR, show_temp, \ store_temp, TEMP_CRIT, index - 1), \ SENSOR_ATTR_2(temp##index##_crit_hyst, S_IRUGO | S_IWUSR, \ show_temp, store_temp, TEMP_CRIT_HYST, index - 1), \ SENSOR_ATTR_2(temp##index##_max, S_IRUGO | S_IWUSR, show_temp, \ store_temp, TEMP_WARN, index - 1), \ SENSOR_ATTR_2(temp##index##_max_hyst, S_IRUGO | S_IWUSR, \ show_temp, store_temp, TEMP_WARN_HYST, index - 1), \ SENSOR_ATTR_2(temp##index##_alarm, S_IRUGO, \ show_alarm_beep, NULL, ALARM_STATUS, \ index + (index > 4 ? 11 : 17)), \ SENSOR_ATTR_2(temp##index##_beep, S_IWUSR | S_IRUGO, \ show_alarm_beep, store_beep, BEEP_ENABLE, \ index + (index > 4 ? 11 : 17)), \ SENSOR_ATTR_2(temp##index##_pwm_enable, S_IWUSR | S_IRUGO, \ show_temp_pwm_enable, store_temp_pwm_enable, \ TEMP_PWM_ENABLE, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_channels_pwm, S_IWUSR | S_IRUGO, \ show_temp_pwm_enable, store_temp_pwm_enable, \ TEMP_PWM_FAN_MAP, index - 1), \ SENSOR_ATTR_2(thermal_cruise##index, S_IWUSR | S_IRUGO, \ show_temp_pwm, store_temp_pwm, TEMP_PWM_TTTI, index - 1), \ SENSOR_ATTR_2(temp##index##_warn, S_IWUSR | S_IRUGO, \ show_temp_pwm, store_temp_pwm, TEMP_PWM_CTFS, index - 1), \ SENSOR_ATTR_2(temp##index##_warn_hyst, S_IWUSR | S_IRUGO, \ show_temp_pwm, store_temp_pwm, TEMP_PWM_HCT, index - 1), \ SENSOR_ATTR_2(temp##index##_operation_hyst, S_IWUSR | S_IRUGO, \ show_temp_pwm, store_temp_pwm, TEMP_PWM_HOT, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point1_pwm, S_IRUGO | S_IWUSR, \ show_sf4_pwm, store_sf4_pwm, 0, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point2_pwm, S_IRUGO | S_IWUSR, \ show_sf4_pwm, store_sf4_pwm, 1, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point3_pwm, S_IRUGO | S_IWUSR, \ show_sf4_pwm, store_sf4_pwm, 2, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point4_pwm, S_IRUGO | S_IWUSR, \ show_sf4_pwm, store_sf4_pwm, 3, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point5_pwm, S_IRUGO | S_IWUSR, \ show_sf4_pwm, store_sf4_pwm, 4, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point6_pwm, S_IRUGO | S_IWUSR, \ show_sf4_pwm, store_sf4_pwm, 5, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point7_pwm, S_IRUGO | S_IWUSR, \ show_sf4_pwm, store_sf4_pwm, 6, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point1_temp, S_IRUGO | S_IWUSR,\ show_sf4_temp, store_sf4_temp, 0, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point2_temp, S_IRUGO | S_IWUSR,\ show_sf4_temp, store_sf4_temp, 1, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point3_temp, S_IRUGO | S_IWUSR,\ show_sf4_temp, store_sf4_temp, 2, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point4_temp, S_IRUGO | S_IWUSR,\ show_sf4_temp, store_sf4_temp, 3, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point5_temp, S_IRUGO | S_IWUSR,\ show_sf4_temp, store_sf4_temp, 4, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point6_temp, S_IRUGO | S_IWUSR,\ show_sf4_temp, store_sf4_temp, 5, index - 1), \ SENSOR_ATTR_2(temp##index##_auto_point7_temp, S_IRUGO | S_IWUSR,\ show_sf4_temp, store_sf4_temp, 6, index - 1) } static struct sensor_device_attribute_2 w83795_in[][5] = { SENSOR_ATTR_IN(0), SENSOR_ATTR_IN(1), SENSOR_ATTR_IN(2), SENSOR_ATTR_IN(3), SENSOR_ATTR_IN(4), SENSOR_ATTR_IN(5), SENSOR_ATTR_IN(6), SENSOR_ATTR_IN(7), SENSOR_ATTR_IN(8), SENSOR_ATTR_IN(9), SENSOR_ATTR_IN(10), SENSOR_ATTR_IN(11), SENSOR_ATTR_IN(12), SENSOR_ATTR_IN(13), SENSOR_ATTR_IN(14), SENSOR_ATTR_IN(15), SENSOR_ATTR_IN(16), SENSOR_ATTR_IN(17), SENSOR_ATTR_IN(18), SENSOR_ATTR_IN(19), SENSOR_ATTR_IN(20), }; static const struct sensor_device_attribute_2 w83795_fan[][4] = { SENSOR_ATTR_FAN(1), SENSOR_ATTR_FAN(2), SENSOR_ATTR_FAN(3), SENSOR_ATTR_FAN(4), SENSOR_ATTR_FAN(5), SENSOR_ATTR_FAN(6), SENSOR_ATTR_FAN(7), SENSOR_ATTR_FAN(8), SENSOR_ATTR_FAN(9), SENSOR_ATTR_FAN(10), SENSOR_ATTR_FAN(11), SENSOR_ATTR_FAN(12), SENSOR_ATTR_FAN(13), SENSOR_ATTR_FAN(14), }; static const struct sensor_device_attribute_2 w83795_temp[][28] = { SENSOR_ATTR_TEMP(1), SENSOR_ATTR_TEMP(2), SENSOR_ATTR_TEMP(3), SENSOR_ATTR_TEMP(4), SENSOR_ATTR_TEMP(5), SENSOR_ATTR_TEMP(6), }; static const struct sensor_device_attribute_2 w83795_dts[][8] = { SENSOR_ATTR_DTS(7), SENSOR_ATTR_DTS(8), SENSOR_ATTR_DTS(9), SENSOR_ATTR_DTS(10), SENSOR_ATTR_DTS(11), SENSOR_ATTR_DTS(12), SENSOR_ATTR_DTS(13), SENSOR_ATTR_DTS(14), }; static const struct sensor_device_attribute_2 w83795_pwm[][8] = { SENSOR_ATTR_PWM(1), SENSOR_ATTR_PWM(2), SENSOR_ATTR_PWM(3), SENSOR_ATTR_PWM(4), SENSOR_ATTR_PWM(5), SENSOR_ATTR_PWM(6), SENSOR_ATTR_PWM(7), SENSOR_ATTR_PWM(8), }; static const struct sensor_device_attribute_2 w83795_tss[6] = { SENSOR_ATTR_2(temp1_source_sel, S_IWUSR | S_IRUGO, show_temp_src, store_temp_src, NOT_USED, 0), SENSOR_ATTR_2(temp2_source_sel, S_IWUSR | S_IRUGO, show_temp_src, store_temp_src, NOT_USED, 1), SENSOR_ATTR_2(temp3_source_sel, S_IWUSR | S_IRUGO, show_temp_src, store_temp_src, NOT_USED, 2), SENSOR_ATTR_2(temp4_source_sel, S_IWUSR | S_IRUGO, show_temp_src, store_temp_src, NOT_USED, 3), SENSOR_ATTR_2(temp5_source_sel, S_IWUSR | S_IRUGO, show_temp_src, store_temp_src, NOT_USED, 4), SENSOR_ATTR_2(temp6_source_sel, S_IWUSR | S_IRUGO, show_temp_src, store_temp_src, NOT_USED, 5), }; static const struct sensor_device_attribute_2 sda_single_files[] = { SENSOR_ATTR_2(intrusion0_alarm, S_IWUSR | S_IRUGO, show_alarm_beep, store_chassis_clear, ALARM_STATUS, 46), #ifdef CONFIG_SENSORS_W83795_FANCTRL SENSOR_ATTR_2(speed_cruise_tolerance, S_IWUSR | S_IRUGO, show_fanin, store_fanin, FANIN_TOL, NOT_USED), SENSOR_ATTR_2(pwm_default, S_IWUSR | S_IRUGO, show_sf_setup, store_sf_setup, SETUP_PWM_DEFAULT, NOT_USED), SENSOR_ATTR_2(pwm_uptime, S_IWUSR | S_IRUGO, show_sf_setup, store_sf_setup, SETUP_PWM_UPTIME, NOT_USED), SENSOR_ATTR_2(pwm_downtime, S_IWUSR | S_IRUGO, show_sf_setup, store_sf_setup, SETUP_PWM_DOWNTIME, NOT_USED), #endif }; static const struct sensor_device_attribute_2 sda_beep_files[] = { SENSOR_ATTR_2(intrusion0_beep, S_IWUSR | S_IRUGO, show_alarm_beep, store_beep, BEEP_ENABLE, 46), SENSOR_ATTR_2(beep_enable, S_IWUSR | S_IRUGO, show_alarm_beep, store_beep, BEEP_ENABLE, 47), }; /* * Driver interface */ static void w83795_init_client(struct i2c_client *client) { struct w83795_data *data = i2c_get_clientdata(client); static const u16 clkin[4] = { /* in kHz */ 14318, 24000, 33333, 48000 }; u8 config; if (reset) w83795_write(client, W83795_REG_CONFIG, 0x80); /* Start monitoring if needed */ config = w83795_read(client, W83795_REG_CONFIG); if (!(config & W83795_REG_CONFIG_START)) { dev_info(&client->dev, "Enabling monitoring operations\n"); w83795_write(client, W83795_REG_CONFIG, config | W83795_REG_CONFIG_START); } data->clkin = clkin[(config >> 3) & 0x3]; dev_dbg(&client->dev, "clkin = %u kHz\n", data->clkin); } static int w83795_get_device_id(struct i2c_client *client) { int device_id; device_id = i2c_smbus_read_byte_data(client, W83795_REG_DEVICEID); /* * Special case for rev. A chips; can't be checked first because later * revisions emulate this for compatibility */ if (device_id < 0 || (device_id & 0xf0) != 0x50) { int alt_id; alt_id = i2c_smbus_read_byte_data(client, W83795_REG_DEVICEID_A); if (alt_id == 0x50) device_id = alt_id; } return device_id; } /* Return 0 if detection is successful, -ENODEV otherwise */ static int w83795_detect(struct i2c_client *client, struct i2c_board_info *info) { int bank, vendor_id, device_id, expected, i2c_addr, config; struct i2c_adapter *adapter = client->adapter; unsigned short address = client->addr; const char *chip_name; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -ENODEV; bank = i2c_smbus_read_byte_data(client, W83795_REG_BANKSEL); if (bank < 0 || (bank & 0x7c)) { dev_dbg(&adapter->dev, "w83795: Detection failed at addr 0x%02hx, check %s\n", address, "bank"); return -ENODEV; } /* Check Nuvoton vendor ID */ vendor_id = i2c_smbus_read_byte_data(client, W83795_REG_VENDORID); expected = bank & 0x80 ? 0x5c : 0xa3; if (vendor_id != expected) { dev_dbg(&adapter->dev, "w83795: Detection failed at addr 0x%02hx, check %s\n", address, "vendor id"); return -ENODEV; } /* Check device ID */ device_id = w83795_get_device_id(client) | (i2c_smbus_read_byte_data(client, W83795_REG_CHIPID) << 8); if ((device_id >> 4) != 0x795) { dev_dbg(&adapter->dev, "w83795: Detection failed at addr 0x%02hx, check %s\n", address, "device id\n"); return -ENODEV; } /* * If Nuvoton chip, address of chip and W83795_REG_I2C_ADDR * should match */ if ((bank & 0x07) == 0) { i2c_addr = i2c_smbus_read_byte_data(client, W83795_REG_I2C_ADDR); if ((i2c_addr & 0x7f) != address) { dev_dbg(&adapter->dev, "w83795: Detection failed at addr 0x%02hx, " "check %s\n", address, "i2c addr"); return -ENODEV; } } /* * Check 795 chip type: 795G or 795ADG * Usually we don't write to chips during detection, but here we don't * quite have the choice; hopefully it's OK, we are about to return * success anyway */ if ((bank & 0x07) != 0) i2c_smbus_write_byte_data(client, W83795_REG_BANKSEL, bank & ~0x07); config = i2c_smbus_read_byte_data(client, W83795_REG_CONFIG); if (config & W83795_REG_CONFIG_CONFIG48) chip_name = "w83795adg"; else chip_name = "w83795g"; strscpy(info->type, chip_name, I2C_NAME_SIZE); dev_info(&adapter->dev, "Found %s rev. %c at 0x%02hx\n", chip_name, 'A' + (device_id & 0xf), address); return 0; } #ifdef CONFIG_SENSORS_W83795_FANCTRL #define NUM_PWM_ATTRIBUTES ARRAY_SIZE(w83795_pwm[0]) #define NUM_TEMP_ATTRIBUTES ARRAY_SIZE(w83795_temp[0]) #else #define NUM_PWM_ATTRIBUTES 4 #define NUM_TEMP_ATTRIBUTES 8 #endif static int w83795_handle_files(struct device *dev, int (*fn)(struct device *, const struct device_attribute *)) { struct w83795_data *data = dev_get_drvdata(dev); int err, i, j; for (i = 0; i < ARRAY_SIZE(w83795_in); i++) { if (!(data->has_in & (1 << i))) continue; for (j = 0; j < ARRAY_SIZE(w83795_in[0]); j++) { if (j == 4 && !data->enable_beep) continue; err = fn(dev, &w83795_in[i][j].dev_attr); if (err) return err; } } for (i = 0; i < ARRAY_SIZE(w83795_fan); i++) { if (!(data->has_fan & (1 << i))) continue; for (j = 0; j < ARRAY_SIZE(w83795_fan[0]); j++) { if (j == 3 && !data->enable_beep) continue; err = fn(dev, &w83795_fan[i][j].dev_attr); if (err) return err; } } for (i = 0; i < ARRAY_SIZE(w83795_tss); i++) { j = w83795_tss_useful(data, i); if (!j) continue; err = fn(dev, &w83795_tss[i].dev_attr); if (err) return err; } for (i = 0; i < ARRAY_SIZE(sda_single_files); i++) { err = fn(dev, &sda_single_files[i].dev_attr); if (err) return err; } if (data->enable_beep) { for (i = 0; i < ARRAY_SIZE(sda_beep_files); i++) { err = fn(dev, &sda_beep_files[i].dev_attr); if (err) return err; } } for (i = 0; i < data->has_pwm; i++) { for (j = 0; j < NUM_PWM_ATTRIBUTES; j++) { err = fn(dev, &w83795_pwm[i][j].dev_attr); if (err) return err; } } for (i = 0; i < ARRAY_SIZE(w83795_temp); i++) { if (!(data->has_temp & (1 << i))) continue; for (j = 0; j < NUM_TEMP_ATTRIBUTES; j++) { if (j == 7 && !data->enable_beep) continue; err = fn(dev, &w83795_temp[i][j].dev_attr); if (err) return err; } } if (data->enable_dts) { for (i = 0; i < ARRAY_SIZE(w83795_dts); i++) { if (!(data->has_dts & (1 << i))) continue; for (j = 0; j < ARRAY_SIZE(w83795_dts[0]); j++) { if (j == 7 && !data->enable_beep) continue; err = fn(dev, &w83795_dts[i][j].dev_attr); if (err) return err; } } } return 0; } /* We need a wrapper that fits in w83795_handle_files */ static int device_remove_file_wrapper(struct device *dev, const struct device_attribute *attr) { device_remove_file(dev, attr); return 0; } static void w83795_check_dynamic_in_limits(struct i2c_client *client) { struct w83795_data *data = i2c_get_clientdata(client); u8 vid_ctl; int i, err_max, err_min; vid_ctl = w83795_read(client, W83795_REG_VID_CTRL); /* Return immediately if VRM isn't configured */ if ((vid_ctl & 0x07) == 0x00 || (vid_ctl & 0x07) == 0x07) return; data->has_dyn_in = (vid_ctl >> 3) & 0x07; for (i = 0; i < 2; i++) { if (!(data->has_dyn_in & (1 << i))) continue; /* Voltage limits in dynamic mode, switch to read-only */ err_max = sysfs_chmod_file(&client->dev.kobj, &w83795_in[i][2].dev_attr.attr, S_IRUGO); err_min = sysfs_chmod_file(&client->dev.kobj, &w83795_in[i][3].dev_attr.attr, S_IRUGO); if (err_max || err_min) dev_warn(&client->dev, "Failed to set in%d limits read-only (%d, %d)\n", i, err_max, err_min); else dev_info(&client->dev, "in%d limits set dynamically from VID\n", i); } } /* Check pins that can be used for either temperature or voltage monitoring */ static void w83795_apply_temp_config(struct w83795_data *data, u8 config, int temp_chan, int in_chan) { /* config is a 2-bit value */ switch (config) { case 0x2: /* Voltage monitoring */ data->has_in |= 1 << in_chan; break; case 0x1: /* Thermal diode */ if (temp_chan >= 4) break; data->temp_mode |= 1 << temp_chan; fallthrough; case 0x3: /* Thermistor */ data->has_temp |= 1 << temp_chan; break; } } static int w83795_probe(struct i2c_client *client) { int i; u8 tmp; struct device *dev = &client->dev; struct w83795_data *data; int err; data = devm_kzalloc(dev, sizeof(struct w83795_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); data->chip_type = (uintptr_t)i2c_get_match_data(client); data->bank = i2c_smbus_read_byte_data(client, W83795_REG_BANKSEL); mutex_init(&data->update_lock); /* Initialize the chip */ w83795_init_client(client); /* Check which voltages and fans are present */ data->has_in = w83795_read(client, W83795_REG_VOLT_CTRL1) | (w83795_read(client, W83795_REG_VOLT_CTRL2) << 8); data->has_fan = w83795_read(client, W83795_REG_FANIN_CTRL1) | (w83795_read(client, W83795_REG_FANIN_CTRL2) << 8); /* Check which analog temperatures and extra voltages are present */ tmp = w83795_read(client, W83795_REG_TEMP_CTRL1); if (tmp & 0x20) data->enable_dts = 1; w83795_apply_temp_config(data, (tmp >> 2) & 0x3, 5, 16); w83795_apply_temp_config(data, tmp & 0x3, 4, 15); tmp = w83795_read(client, W83795_REG_TEMP_CTRL2); w83795_apply_temp_config(data, tmp >> 6, 3, 20); w83795_apply_temp_config(data, (tmp >> 4) & 0x3, 2, 19); w83795_apply_temp_config(data, (tmp >> 2) & 0x3, 1, 18); w83795_apply_temp_config(data, tmp & 0x3, 0, 17); /* Check DTS enable status */ if (data->enable_dts) { if (1 & w83795_read(client, W83795_REG_DTSC)) data->enable_dts |= 2; data->has_dts = w83795_read(client, W83795_REG_DTSE); } /* Report PECI Tbase values */ if (data->enable_dts == 1) { for (i = 0; i < 8; i++) { if (!(data->has_dts & (1 << i))) continue; tmp = w83795_read(client, W83795_REG_PECI_TBASE(i)); dev_info(&client->dev, "PECI agent %d Tbase temperature: %u\n", i + 1, (unsigned int)tmp & 0x7f); } } data->has_gain = w83795_read(client, W83795_REG_VMIGB_CTRL) & 0x0f; /* pwm and smart fan */ if (data->chip_type == w83795g) data->has_pwm = 8; else data->has_pwm = 2; /* Check if BEEP pin is available */ if (data->chip_type == w83795g) { /* The W83795G has a dedicated BEEP pin */ data->enable_beep = 1; } else { /* * The W83795ADG has a shared pin for OVT# and BEEP, so you * can't have both */ tmp = w83795_read(client, W83795_REG_OVT_CFG); if ((tmp & OVT_CFG_SEL) == 0) data->enable_beep = 1; } err = w83795_handle_files(dev, device_create_file); if (err) goto exit_remove; if (data->chip_type == w83795g) w83795_check_dynamic_in_limits(client); data->hwmon_dev = hwmon_device_register(dev); if (IS_ERR(data->hwmon_dev)) { err = PTR_ERR(data->hwmon_dev); goto exit_remove; } return 0; exit_remove: w83795_handle_files(dev, device_remove_file_wrapper); return err; } static void w83795_remove(struct i2c_client *client) { struct w83795_data *data = i2c_get_clientdata(client); hwmon_device_unregister(data->hwmon_dev); w83795_handle_files(&client->dev, device_remove_file_wrapper); } static const struct i2c_device_id w83795_id[] = { { "w83795g", w83795g }, { "w83795adg", w83795adg }, { } }; MODULE_DEVICE_TABLE(i2c, w83795_id); static struct i2c_driver w83795_driver = { .driver = { .name = "w83795", }, .probe = w83795_probe, .remove = w83795_remove, .id_table = w83795_id, .class = I2C_CLASS_HWMON, .detect = w83795_detect, .address_list = normal_i2c, }; module_i2c_driver(w83795_driver); MODULE_AUTHOR("Wei Song, Jean Delvare <[email protected]>"); MODULE_DESCRIPTION("W83795G/ADG hardware monitoring driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * arch/sh/kernel/cpu/sh4a/clock-sh7724.c * * SH7724 clock framework support * * Copyright (C) 2009 Magnus Damm */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/sh_clk.h> #include <asm/clock.h> #include <cpu/sh7724.h> /* SH7724 registers */ #define FRQCRA 0xa4150000 #define FRQCRB 0xa4150004 #define VCLKCR 0xa4150048 #define FCLKACR 0xa4150008 #define FCLKBCR 0xa415000c #define IRDACLKCR 0xa4150018 #define PLLCR 0xa4150024 #define MSTPCR0 0xa4150030 #define MSTPCR1 0xa4150034 #define MSTPCR2 0xa4150038 #define SPUCLKCR 0xa415003c #define FLLFRQ 0xa4150050 #define LSTATS 0xa4150060 /* Fixed 32 KHz root clock for RTC and Power Management purposes */ static struct clk r_clk = { .rate = 32768, }; /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ static struct clk extal_clk = { .rate = 33333333, }; /* The fll multiplies the 32khz r_clk, may be used instead of extal */ static unsigned long fll_recalc(struct clk *clk) { unsigned long mult = 0; unsigned long div = 1; if (__raw_readl(PLLCR) & 0x1000) mult = __raw_readl(FLLFRQ) & 0x3ff; if (__raw_readl(FLLFRQ) & 0x4000) div = 2; return (clk->parent->rate * mult) / div; } static struct sh_clk_ops fll_clk_ops = { .recalc = fll_recalc, }; static struct clk fll_clk = { .ops = &fll_clk_ops, .parent = &r_clk, .flags = CLK_ENABLE_ON_INIT, }; static unsigned long pll_recalc(struct clk *clk) { unsigned long mult = 1; if (__raw_readl(PLLCR) & 0x4000) mult = (((__raw_readl(FRQCRA) >> 24) & 0x3f) + 1) * 2; return clk->parent->rate * mult; } static struct sh_clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .flags = CLK_ENABLE_ON_INIT, }; /* A fixed divide-by-3 block use by the div6 clocks */ static unsigned long div3_recalc(struct clk *clk) { return clk->parent->rate / 3; } static struct sh_clk_ops div3_clk_ops = { .recalc = div3_recalc, }; static struct clk div3_clk = { .ops = &div3_clk_ops, .parent = &pll_clk, }; /* External input clock (pin name: FSIMCKA/FSIMCKB/DV_CLKI ) */ struct clk sh7724_fsimcka_clk = { }; struct clk sh7724_fsimckb_clk = { }; struct clk sh7724_dv_clki = { }; static struct clk *main_clks[] = { &r_clk, &extal_clk, &fll_clk, &pll_clk, &div3_clk, &sh7724_fsimcka_clk, &sh7724_fsimckb_clk, &sh7724_dv_clki, }; static void div4_kick(struct clk *clk) { unsigned long value; /* set KICK bit in FRQCRA to update hardware setting */ value = __raw_readl(FRQCRA); value |= (1 << 31); __raw_writel(value, FRQCRA); } static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = divisors, .nr_divisors = ARRAY_SIZE(divisors), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, .kick = div4_kick, }; enum { DIV4_I, DIV4_SH, DIV4_B, DIV4_P, DIV4_M1, DIV4_NR }; #define DIV4(_reg, _bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, _reg, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_I] = DIV4(FRQCRA, 20, 0x2f7d, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(FRQCRA, 12, 0x2f7c, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4(FRQCRA, 8, 0x2f7c, CLK_ENABLE_ON_INIT), [DIV4_P] = DIV4(FRQCRA, 0, 0x2f7c, 0), [DIV4_M1] = DIV4(FRQCRB, 4, 0x2f7c, CLK_ENABLE_ON_INIT), }; enum { DIV6_V, DIV6_I, DIV6_S, DIV6_FA, DIV6_FB, DIV6_NR }; /* Indices are important - they are the actual src selecting values */ static struct clk *common_parent[] = { [0] = &div3_clk, [1] = NULL, }; static struct clk *vclkcr_parent[8] = { [0] = &div3_clk, [2] = &sh7724_dv_clki, [4] = &extal_clk, }; static struct clk *fclkacr_parent[] = { [0] = &div3_clk, [1] = NULL, [2] = &sh7724_fsimcka_clk, [3] = NULL, }; static struct clk *fclkbcr_parent[] = { [0] = &div3_clk, [1] = NULL, [2] = &sh7724_fsimckb_clk, [3] = NULL, }; static struct clk div6_clks[DIV6_NR] = { [DIV6_V] = SH_CLK_DIV6_EXT(VCLKCR, 0, vclkcr_parent, ARRAY_SIZE(vclkcr_parent), 12, 3), [DIV6_I] = SH_CLK_DIV6_EXT(IRDACLKCR, 0, common_parent, ARRAY_SIZE(common_parent), 6, 1), [DIV6_S] = SH_CLK_DIV6_EXT(SPUCLKCR, CLK_ENABLE_ON_INIT, common_parent, ARRAY_SIZE(common_parent), 6, 1), [DIV6_FA] = SH_CLK_DIV6_EXT(FCLKACR, 0, fclkacr_parent, ARRAY_SIZE(fclkacr_parent), 6, 2), [DIV6_FB] = SH_CLK_DIV6_EXT(FCLKBCR, 0, fclkbcr_parent, ARRAY_SIZE(fclkbcr_parent), 6, 2), }; static struct clk mstp_clks[HWBLK_NR] = { [HWBLK_TLB] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 31, CLK_ENABLE_ON_INIT), [HWBLK_IC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 30, CLK_ENABLE_ON_INIT), [HWBLK_OC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 29, CLK_ENABLE_ON_INIT), [HWBLK_RSMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 28, CLK_ENABLE_ON_INIT), [HWBLK_ILMEM] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 27, CLK_ENABLE_ON_INIT), [HWBLK_L2C] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 26, CLK_ENABLE_ON_INIT), [HWBLK_FPU] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 24, CLK_ENABLE_ON_INIT), [HWBLK_INTC] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 22, CLK_ENABLE_ON_INIT), [HWBLK_DMAC0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 21, 0), [HWBLK_SHYWAY] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR0, 20, CLK_ENABLE_ON_INIT), [HWBLK_HUDI] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 19, 0), [HWBLK_UBC] = SH_CLK_MSTP32(&div4_clks[DIV4_I], MSTPCR0, 17, 0), [HWBLK_TMU0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 15, 0), [HWBLK_CMT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 14, 0), [HWBLK_RWDT] = SH_CLK_MSTP32(&r_clk, MSTPCR0, 13, 0), [HWBLK_DMAC1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 12, 0), [HWBLK_TMU1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 10, 0), [HWBLK_SCIF0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 9, 0), [HWBLK_SCIF1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 8, 0), [HWBLK_SCIF2] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 7, 0), [HWBLK_SCIF3] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 6, 0), [HWBLK_SCIF4] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 5, 0), [HWBLK_SCIF5] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 4, 0), [HWBLK_MSIOF0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 2, 0), [HWBLK_MSIOF1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR0, 1, 0), [HWBLK_KEYSC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 12, 0), [HWBLK_RTC] = SH_CLK_MSTP32(&r_clk, MSTPCR1, 11, 0), [HWBLK_IIC0] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 9, 0), [HWBLK_IIC1] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 8, 0), [HWBLK_MMC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 29, 0), [HWBLK_ETHER] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 28, 0), [HWBLK_ATAPI] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 26, 0), [HWBLK_TPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 25, 0), [HWBLK_IRDA] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 24, 0), [HWBLK_TSIF] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 22, 0), [HWBLK_USB1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 21, 0), [HWBLK_USB0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 20, 0), [HWBLK_2DG] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 19, 0), [HWBLK_SDHI0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 18, 0), [HWBLK_SDHI1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 17, 0), [HWBLK_VEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 15, 0), [HWBLK_CEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 13, 0), [HWBLK_BEU1] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 12, 0), [HWBLK_2DDMAC] = SH_CLK_MSTP32(&div4_clks[DIV4_SH], MSTPCR2, 10, 0), [HWBLK_SPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 9, 0), [HWBLK_JPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 6, 0), [HWBLK_VOU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 5, 0), [HWBLK_BEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 4, 0), [HWBLK_CEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 3, 0), [HWBLK_VEU0] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 2, 0), [HWBLK_VPU] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 1, 0), [HWBLK_LCDC] = SH_CLK_MSTP32(&div4_clks[DIV4_B], MSTPCR2, 0, 0), }; static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("rclk", &r_clk), CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("fll_clk", &fll_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), CLKDEV_CON_ID("div3_clk", &div3_clk), /* DIV4 clocks */ CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("bus_clk", &div4_clks[DIV4_B]), CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("vpu_clk", &div4_clks[DIV4_M1]), /* DIV6 clocks */ CLKDEV_CON_ID("video_clk", &div6_clks[DIV6_V]), CLKDEV_CON_ID("fsia_clk", &div6_clks[DIV6_FA]), CLKDEV_CON_ID("fsib_clk", &div6_clks[DIV6_FB]), CLKDEV_CON_ID("irda_clk", &div6_clks[DIV6_I]), CLKDEV_CON_ID("spu_clk", &div6_clks[DIV6_S]), /* MSTP clocks */ CLKDEV_CON_ID("tlb0", &mstp_clks[HWBLK_TLB]), CLKDEV_CON_ID("ic0", &mstp_clks[HWBLK_IC]), CLKDEV_CON_ID("oc0", &mstp_clks[HWBLK_OC]), CLKDEV_CON_ID("rs0", &mstp_clks[HWBLK_RSMEM]), CLKDEV_CON_ID("ilmem0", &mstp_clks[HWBLK_ILMEM]), CLKDEV_CON_ID("l2c0", &mstp_clks[HWBLK_L2C]), CLKDEV_CON_ID("fpu0", &mstp_clks[HWBLK_FPU]), CLKDEV_CON_ID("intc0", &mstp_clks[HWBLK_INTC]), CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[HWBLK_DMAC0]), CLKDEV_CON_ID("sh0", &mstp_clks[HWBLK_SHYWAY]), CLKDEV_CON_ID("hudi0", &mstp_clks[HWBLK_HUDI]), CLKDEV_CON_ID("ubc0", &mstp_clks[HWBLK_UBC]), CLKDEV_ICK_ID("fck", "sh-tmu.0", &mstp_clks[HWBLK_TMU0]), CLKDEV_ICK_ID("fck", "sh-tmu.1", &mstp_clks[HWBLK_TMU1]), CLKDEV_ICK_ID("fck", "sh-cmt-32.0", &mstp_clks[HWBLK_CMT]), CLKDEV_DEV_ID("sh-wdt.0", &mstp_clks[HWBLK_RWDT]), CLKDEV_DEV_ID("sh-dma-engine.1", &mstp_clks[HWBLK_DMAC1]), CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[HWBLK_SCIF0]), CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[HWBLK_SCIF1]), CLKDEV_DEV_ID("sh-sci.2", &mstp_clks[HWBLK_SCIF2]), CLKDEV_DEV_ID("sh-sci.3", &mstp_clks[HWBLK_SCIF3]), CLKDEV_DEV_ID("sh-sci.4", &mstp_clks[HWBLK_SCIF4]), CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[HWBLK_SCIF5]), CLKDEV_DEV_ID("spi_sh_msiof.0", &mstp_clks[HWBLK_MSIOF0]), CLKDEV_DEV_ID("spi_sh_msiof.1", &mstp_clks[HWBLK_MSIOF1]), CLKDEV_DEV_ID("sh_keysc.0", &mstp_clks[HWBLK_KEYSC]), CLKDEV_CON_ID("rtc0", &mstp_clks[HWBLK_RTC]), CLKDEV_DEV_ID("i2c-sh_mobile.0", &mstp_clks[HWBLK_IIC0]), CLKDEV_DEV_ID("i2c-sh_mobile.1", &mstp_clks[HWBLK_IIC1]), CLKDEV_DEV_ID("sh_mmcif.0", &mstp_clks[HWBLK_MMC]), CLKDEV_DEV_ID("sh7724-ether.0", &mstp_clks[HWBLK_ETHER]), CLKDEV_CON_ID("atapi0", &mstp_clks[HWBLK_ATAPI]), CLKDEV_CON_ID("tpu0", &mstp_clks[HWBLK_TPU]), CLKDEV_CON_ID("irda0", &mstp_clks[HWBLK_IRDA]), CLKDEV_CON_ID("tsif0", &mstp_clks[HWBLK_TSIF]), CLKDEV_DEV_ID("renesas_usbhs.1", &mstp_clks[HWBLK_USB1]), CLKDEV_DEV_ID("renesas_usbhs.0", &mstp_clks[HWBLK_USB0]), CLKDEV_CON_ID("usb1", &mstp_clks[HWBLK_USB1]), CLKDEV_CON_ID("usb0", &mstp_clks[HWBLK_USB0]), CLKDEV_CON_ID("2dg0", &mstp_clks[HWBLK_2DG]), CLKDEV_DEV_ID("sh_mobile_sdhi.0", &mstp_clks[HWBLK_SDHI0]), CLKDEV_DEV_ID("sh_mobile_sdhi.1", &mstp_clks[HWBLK_SDHI1]), CLKDEV_CON_ID("veu1", &mstp_clks[HWBLK_VEU1]), CLKDEV_DEV_ID("renesas-ceu.1", &mstp_clks[HWBLK_CEU1]), CLKDEV_CON_ID("beu1", &mstp_clks[HWBLK_BEU1]), CLKDEV_CON_ID("2ddmac0", &mstp_clks[HWBLK_2DDMAC]), CLKDEV_DEV_ID("sh_fsi.0", &mstp_clks[HWBLK_SPU]), CLKDEV_CON_ID("jpu0", &mstp_clks[HWBLK_JPU]), CLKDEV_DEV_ID("sh-vou", &mstp_clks[HWBLK_VOU]), CLKDEV_CON_ID("beu0", &mstp_clks[HWBLK_BEU0]), CLKDEV_DEV_ID("renesas-ceu.0", &mstp_clks[HWBLK_CEU0]), CLKDEV_CON_ID("veu0", &mstp_clks[HWBLK_VEU0]), CLKDEV_CON_ID("vpu0", &mstp_clks[HWBLK_VPU]), CLKDEV_DEV_ID("sh_mobile_lcdc_fb.0", &mstp_clks[HWBLK_LCDC]), }; int __init arch_clk_init(void) { int k, ret = 0; /* autodetect extal or fll configuration */ if (__raw_readl(PLLCR) & 0x1000) pll_clk.parent = &fll_clk; else pll_clk.parent = &extal_clk; for (k = 0; !ret && (k < ARRAY_SIZE(main_clks)); k++) ret = clk_register(main_clks[k]); clkdev_add_table(lookups, ARRAY_SIZE(lookups)); if (!ret) ret = sh_clk_div4_register(div4_clks, DIV4_NR, &div4_table); if (!ret) ret = sh_clk_div6_reparent_register(div6_clks, DIV6_NR); if (!ret) ret = sh_clk_mstp_register(mstp_clks, HWBLK_NR); return ret; }
// SPDX-License-Identifier: GPL-2.0 #include <test_progs.h> #define nr_iters 2 void serial_test_bpf_obj_id(void) { const __u64 array_magic_value = 0xfaceb00c; const __u32 array_key = 0; const char *file = "./test_obj_id.bpf.o"; const char *expected_prog_name = "test_obj_id"; const char *expected_map_name = "test_map_id"; const __u64 nsec_per_sec = 1000000000; struct bpf_object *objs[nr_iters] = {}; struct bpf_link *links[nr_iters] = {}; struct bpf_program *prog; int prog_fds[nr_iters], map_fds[nr_iters]; /* +1 to test for the info_len returned by kernel */ struct bpf_prog_info prog_infos[nr_iters + 1]; struct bpf_map_info map_infos[nr_iters + 1]; struct bpf_link_info link_infos[nr_iters + 1]; /* Each prog only uses one map. +1 to test nr_map_ids * returned by kernel. */ __u32 map_ids[nr_iters + 1]; char jited_insns[128], xlated_insns[128], zeros[128], tp_name[128]; __u32 i, next_id, info_len, nr_id_found; struct timespec real_time_ts, boot_time_ts; int err = 0; __u64 array_value; uid_t my_uid = getuid(); time_t now, load_time; err = bpf_prog_get_fd_by_id(0); ASSERT_LT(err, 0, "bpf_prog_get_fd_by_id"); ASSERT_EQ(errno, ENOENT, "bpf_prog_get_fd_by_id"); err = bpf_map_get_fd_by_id(0); ASSERT_LT(err, 0, "bpf_map_get_fd_by_id"); ASSERT_EQ(errno, ENOENT, "bpf_map_get_fd_by_id"); err = bpf_link_get_fd_by_id(0); ASSERT_LT(err, 0, "bpf_map_get_fd_by_id"); ASSERT_EQ(errno, ENOENT, "bpf_map_get_fd_by_id"); /* Check bpf_map_get_info_by_fd() */ bzero(zeros, sizeof(zeros)); for (i = 0; i < nr_iters; i++) { now = time(NULL); err = bpf_prog_test_load(file, BPF_PROG_TYPE_RAW_TRACEPOINT, &objs[i], &prog_fds[i]); /* test_obj_id.o is a dumb prog. It should never fail * to load. */ if (!ASSERT_OK(err, "bpf_prog_test_load")) continue; /* Insert a magic value to the map */ map_fds[i] = bpf_find_map(__func__, objs[i], "test_map_id"); if (!ASSERT_GE(map_fds[i], 0, "bpf_find_map")) goto done; err = bpf_map_update_elem(map_fds[i], &array_key, &array_magic_value, 0); if (!ASSERT_OK(err, "bpf_map_update_elem")) goto done; prog = bpf_object__find_program_by_name(objs[i], "test_obj_id"); if (!ASSERT_OK_PTR(prog, "bpf_object__find_program_by_name")) goto done; links[i] = bpf_program__attach(prog); err = libbpf_get_error(links[i]); if (!ASSERT_OK(err, "bpf_program__attach")) { links[i] = NULL; goto done; } /* Check getting map info */ info_len = sizeof(struct bpf_map_info) * 2; bzero(&map_infos[i], info_len); err = bpf_map_get_info_by_fd(map_fds[i], &map_infos[i], &info_len); if (!ASSERT_OK(err, "bpf_map_get_info_by_fd") || !ASSERT_EQ(map_infos[i].type, BPF_MAP_TYPE_ARRAY, "map_type") || !ASSERT_EQ(map_infos[i].key_size, sizeof(__u32), "key_size") || !ASSERT_EQ(map_infos[i].value_size, sizeof(__u64), "value_size") || !ASSERT_EQ(map_infos[i].max_entries, 1, "max_entries") || !ASSERT_EQ(map_infos[i].map_flags, 0, "map_flags") || !ASSERT_EQ(info_len, sizeof(struct bpf_map_info), "map_info_len") || !ASSERT_STREQ((char *)map_infos[i].name, expected_map_name, "map_name")) goto done; /* Check getting prog info */ info_len = sizeof(struct bpf_prog_info) * 2; bzero(&prog_infos[i], info_len); bzero(jited_insns, sizeof(jited_insns)); bzero(xlated_insns, sizeof(xlated_insns)); prog_infos[i].jited_prog_insns = ptr_to_u64(jited_insns); prog_infos[i].jited_prog_len = sizeof(jited_insns); prog_infos[i].xlated_prog_insns = ptr_to_u64(xlated_insns); prog_infos[i].xlated_prog_len = sizeof(xlated_insns); prog_infos[i].map_ids = ptr_to_u64(map_ids + i); prog_infos[i].nr_map_ids = 2; err = clock_gettime(CLOCK_REALTIME, &real_time_ts); if (!ASSERT_OK(err, "clock_gettime")) goto done; err = clock_gettime(CLOCK_BOOTTIME, &boot_time_ts); if (!ASSERT_OK(err, "clock_gettime")) goto done; err = bpf_prog_get_info_by_fd(prog_fds[i], &prog_infos[i], &info_len); load_time = (real_time_ts.tv_sec - boot_time_ts.tv_sec) + (prog_infos[i].load_time / nsec_per_sec); if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") || !ASSERT_EQ(prog_infos[i].type, BPF_PROG_TYPE_RAW_TRACEPOINT, "prog_type") || !ASSERT_EQ(info_len, sizeof(struct bpf_prog_info), "prog_info_len") || !ASSERT_FALSE((env.jit_enabled && !prog_infos[i].jited_prog_len), "jited_prog_len") || !ASSERT_FALSE((env.jit_enabled && !memcmp(jited_insns, zeros, sizeof(zeros))), "jited_insns") || !ASSERT_NEQ(prog_infos[i].xlated_prog_len, 0, "xlated_prog_len") || !ASSERT_NEQ(memcmp(xlated_insns, zeros, sizeof(zeros)), 0, "xlated_insns") || !ASSERT_GE(load_time, (now - 60), "load_time") || !ASSERT_LE(load_time, (now + 60), "load_time") || !ASSERT_EQ(prog_infos[i].created_by_uid, my_uid, "created_by_uid") || !ASSERT_EQ(prog_infos[i].nr_map_ids, 1, "nr_map_ids") || !ASSERT_EQ(*(int *)(long)prog_infos[i].map_ids, map_infos[i].id, "map_ids") || !ASSERT_STREQ((char *)prog_infos[i].name, expected_prog_name, "prog_name")) goto done; /* Check getting link info */ info_len = sizeof(struct bpf_link_info) * 2; bzero(&link_infos[i], info_len); link_infos[i].raw_tracepoint.tp_name = ptr_to_u64(&tp_name); link_infos[i].raw_tracepoint.tp_name_len = sizeof(tp_name); err = bpf_link_get_info_by_fd(bpf_link__fd(links[i]), &link_infos[i], &info_len); if (!ASSERT_OK(err, "bpf_link_get_info_by_fd") || !ASSERT_EQ(link_infos[i].type, BPF_LINK_TYPE_RAW_TRACEPOINT, "link_type") || !ASSERT_EQ(link_infos[i].prog_id, prog_infos[i].id, "prog_id") || !ASSERT_EQ(link_infos[i].raw_tracepoint.tp_name, ptr_to_u64(&tp_name), "&tp_name") || !ASSERT_STREQ(u64_to_ptr(link_infos[i].raw_tracepoint.tp_name), "sys_enter", "tp_name")) goto done; } /* Check bpf_prog_get_next_id() */ nr_id_found = 0; next_id = 0; while (!bpf_prog_get_next_id(next_id, &next_id)) { struct bpf_prog_info prog_info = {}; __u32 saved_map_id; int prog_fd, cmp_res; info_len = sizeof(prog_info); prog_fd = bpf_prog_get_fd_by_id(next_id); if (prog_fd < 0 && errno == ENOENT) /* The bpf_prog is in the dead row */ continue; if (!ASSERT_GE(prog_fd, 0, "bpf_prog_get_fd_by_id")) break; for (i = 0; i < nr_iters; i++) if (prog_infos[i].id == next_id) break; if (i == nr_iters) continue; nr_id_found++; /* Negative test: * prog_info.nr_map_ids = 1 * prog_info.map_ids = NULL */ prog_info.nr_map_ids = 1; err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len); if (!ASSERT_ERR(err, "bpf_prog_get_info_by_fd") || !ASSERT_EQ(errno, EFAULT, "bpf_prog_get_info_by_fd")) break; bzero(&prog_info, sizeof(prog_info)); info_len = sizeof(prog_info); saved_map_id = *(int *)((long)prog_infos[i].map_ids); prog_info.map_ids = prog_infos[i].map_ids; prog_info.nr_map_ids = 2; err = bpf_prog_get_info_by_fd(prog_fd, &prog_info, &info_len); prog_infos[i].jited_prog_insns = 0; prog_infos[i].xlated_prog_insns = 0; cmp_res = memcmp(&prog_info, &prog_infos[i], info_len); ASSERT_OK(err, "bpf_prog_get_info_by_fd"); ASSERT_EQ(info_len, sizeof(struct bpf_prog_info), "prog_info_len"); ASSERT_OK(cmp_res, "memcmp"); ASSERT_EQ(*(int *)(long)prog_info.map_ids, saved_map_id, "map_id"); close(prog_fd); } ASSERT_EQ(nr_id_found, nr_iters, "prog_nr_id_found"); /* Check bpf_map_get_next_id() */ nr_id_found = 0; next_id = 0; while (!bpf_map_get_next_id(next_id, &next_id)) { struct bpf_map_info map_info = {}; int map_fd, cmp_res; info_len = sizeof(map_info); map_fd = bpf_map_get_fd_by_id(next_id); if (map_fd < 0 && errno == ENOENT) /* The bpf_map is in the dead row */ continue; if (!ASSERT_GE(map_fd, 0, "bpf_map_get_fd_by_id")) break; for (i = 0; i < nr_iters; i++) if (map_infos[i].id == next_id) break; if (i == nr_iters) continue; nr_id_found++; err = bpf_map_lookup_elem(map_fd, &array_key, &array_value); if (!ASSERT_OK(err, "bpf_map_lookup_elem")) goto done; err = bpf_map_get_info_by_fd(map_fd, &map_info, &info_len); cmp_res = memcmp(&map_info, &map_infos[i], info_len); ASSERT_OK(err, "bpf_map_get_info_by_fd"); ASSERT_EQ(info_len, sizeof(struct bpf_map_info), "info_len"); ASSERT_OK(cmp_res, "memcmp"); ASSERT_EQ(array_value, array_magic_value, "array_value"); close(map_fd); } ASSERT_EQ(nr_id_found, nr_iters, "map_nr_id_found"); /* Check bpf_link_get_next_id() */ nr_id_found = 0; next_id = 0; while (!bpf_link_get_next_id(next_id, &next_id)) { struct bpf_link_info link_info; int link_fd, cmp_res; info_len = sizeof(link_info); memset(&link_info, 0, info_len); link_fd = bpf_link_get_fd_by_id(next_id); if (link_fd < 0 && errno == ENOENT) /* The bpf_link is in the dead row */ continue; if (!ASSERT_GE(link_fd, 0, "bpf_link_get_fd_by_id")) break; for (i = 0; i < nr_iters; i++) if (link_infos[i].id == next_id) break; if (i == nr_iters) continue; nr_id_found++; err = bpf_link_get_info_by_fd(link_fd, &link_info, &info_len); cmp_res = memcmp(&link_info, &link_infos[i], offsetof(struct bpf_link_info, raw_tracepoint)); ASSERT_OK(err, "bpf_link_get_info_by_fd"); ASSERT_EQ(info_len, sizeof(link_info), "info_len"); ASSERT_OK(cmp_res, "memcmp"); close(link_fd); } ASSERT_EQ(nr_id_found, nr_iters, "link_nr_id_found"); done: for (i = 0; i < nr_iters; i++) { bpf_link__destroy(links[i]); bpf_object__close(objs[i]); } }
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (c) 2024 José Expósito */ #include "vmlinux.h" #include "hid_bpf.h" #include "hid_bpf_helpers.h" #include <bpf/bpf_tracing.h> #define VID_RAPOO 0x24AE #define PID_M50 0x2015 #define RDESC_SIZE 186 HID_BPF_CONFIG( HID_DEVICE(BUS_USB, HID_GROUP_GENERIC, VID_RAPOO, PID_M50) ); /* * The Rapoo M50 Plus Silent mouse has 2 side buttons in addition to the left, * right and middle buttons. However, its original HID descriptor has a Usage * Maximum of 3, preventing the side buttons to work. This HID-BPF driver * changes that usage to 5. * * For reference, this is the original report descriptor: * * 0x05, 0x01, // Usage Page (Generic Desktop) 0 * 0x09, 0x02, // Usage (Mouse) 2 * 0xa1, 0x01, // Collection (Application) 4 * 0x85, 0x01, // Report ID (1) 6 * 0x09, 0x01, // Usage (Pointer) 8 * 0xa1, 0x00, // Collection (Physical) 10 * 0x05, 0x09, // Usage Page (Button) 12 * 0x19, 0x01, // Usage Minimum (1) 14 * 0x29, 0x03, // Usage Maximum (3) 16 <- change to 0x05 * 0x15, 0x00, // Logical Minimum (0) 18 * 0x25, 0x01, // Logical Maximum (1) 20 * 0x75, 0x01, // Report Size (1) 22 * 0x95, 0x05, // Report Count (5) 24 * 0x81, 0x02, // Input (Data,Var,Abs) 26 * 0x75, 0x03, // Report Size (3) 28 * 0x95, 0x01, // Report Count (1) 30 * 0x81, 0x01, // Input (Cnst,Arr,Abs) 32 * 0x05, 0x01, // Usage Page (Generic Desktop) 34 * 0x09, 0x30, // Usage (X) 36 * 0x09, 0x31, // Usage (Y) 38 * 0x16, 0x01, 0x80, // Logical Minimum (-32767) 40 * 0x26, 0xff, 0x7f, // Logical Maximum (32767) 43 * 0x75, 0x10, // Report Size (16) 46 * 0x95, 0x02, // Report Count (2) 48 * 0x81, 0x06, // Input (Data,Var,Rel) 50 * 0x09, 0x38, // Usage (Wheel) 52 * 0x15, 0x81, // Logical Minimum (-127) 54 * 0x25, 0x7f, // Logical Maximum (127) 56 * 0x75, 0x08, // Report Size (8) 58 * 0x95, 0x01, // Report Count (1) 60 * 0x81, 0x06, // Input (Data,Var,Rel) 62 * 0xc0, // End Collection 64 * 0xc0, // End Collection 65 * 0x05, 0x0c, // Usage Page (Consumer Devices) 66 * 0x09, 0x01, // Usage (Consumer Control) 68 * 0xa1, 0x01, // Collection (Application) 70 * 0x85, 0x02, // Report ID (2) 72 * 0x75, 0x10, // Report Size (16) 74 * 0x95, 0x01, // Report Count (1) 76 * 0x15, 0x01, // Logical Minimum (1) 78 * 0x26, 0x8c, 0x02, // Logical Maximum (652) 80 * 0x19, 0x01, // Usage Minimum (1) 83 * 0x2a, 0x8c, 0x02, // Usage Maximum (652) 85 * 0x81, 0x00, // Input (Data,Arr,Abs) 88 * 0xc0, // End Collection 90 * 0x05, 0x01, // Usage Page (Generic Desktop) 91 * 0x09, 0x80, // Usage (System Control) 93 * 0xa1, 0x01, // Collection (Application) 95 * 0x85, 0x03, // Report ID (3) 97 * 0x09, 0x82, // Usage (System Sleep) 99 * 0x09, 0x81, // Usage (System Power Down) 101 * 0x09, 0x83, // Usage (System Wake Up) 103 * 0x15, 0x00, // Logical Minimum (0) 105 * 0x25, 0x01, // Logical Maximum (1) 107 * 0x19, 0x01, // Usage Minimum (1) 109 * 0x29, 0x03, // Usage Maximum (3) 111 * 0x75, 0x01, // Report Size (1) 113 * 0x95, 0x03, // Report Count (3) 115 * 0x81, 0x02, // Input (Data,Var,Abs) 117 * 0x95, 0x05, // Report Count (5) 119 * 0x81, 0x01, // Input (Cnst,Arr,Abs) 121 * 0xc0, // End Collection 123 * 0x05, 0x01, // Usage Page (Generic Desktop) 124 * 0x09, 0x00, // Usage (Undefined) 126 * 0xa1, 0x01, // Collection (Application) 128 * 0x85, 0x05, // Report ID (5) 130 * 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 132 * 0x09, 0x01, // Usage (Vendor Usage 1) 135 * 0x15, 0x81, // Logical Minimum (-127) 137 * 0x25, 0x7f, // Logical Maximum (127) 139 * 0x75, 0x08, // Report Size (8) 141 * 0x95, 0x07, // Report Count (7) 143 * 0xb1, 0x02, // Feature (Data,Var,Abs) 145 * 0xc0, // End Collection 147 * 0x06, 0x00, 0xff, // Usage Page (Vendor Defined Page 1) 148 * 0x09, 0x0e, // Usage (Vendor Usage 0x0e) 151 * 0xa1, 0x01, // Collection (Application) 153 * 0x85, 0xba, // Report ID (186) 155 * 0x95, 0x1f, // Report Count (31) 157 * 0x75, 0x08, // Report Size (8) 159 * 0x26, 0xff, 0x00, // Logical Maximum (255) 161 * 0x15, 0x00, // Logical Minimum (0) 164 * 0x09, 0x01, // Usage (Vendor Usage 1) 166 * 0x91, 0x02, // Output (Data,Var,Abs) 168 * 0x85, 0xba, // Report ID (186) 170 * 0x95, 0x1f, // Report Count (31) 172 * 0x75, 0x08, // Report Size (8) 174 * 0x26, 0xff, 0x00, // Logical Maximum (255) 176 * 0x15, 0x00, // Logical Minimum (0) 179 * 0x09, 0x01, // Usage (Vendor Usage 1) 181 * 0x81, 0x02, // Input (Data,Var,Abs) 183 * 0xc0, // End Collection 185 */ SEC(HID_BPF_RDESC_FIXUP) int BPF_PROG(hid_rdesc_fixup_rapoo_m50, struct hid_bpf_ctx *hctx) { __u8 *data = hid_bpf_get_data(hctx, 0, HID_MAX_DESCRIPTOR_SIZE); if (!data) return 0; /* EPERM check */ if (data[17] == 0x03) data[17] = 0x05; return 0; } HID_BPF_OPS(rapoo_m50) = { .hid_rdesc_fixup = (void *)hid_rdesc_fixup_rapoo_m50, }; SEC("syscall") int probe(struct hid_bpf_probe_args *ctx) { ctx->retval = ctx->rdesc_size != RDESC_SIZE; if (ctx->retval) ctx->retval = -EINVAL; return 0; } char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0-or-later /* * Stereo and SAP detection for cx88 * * Copyright (c) 2009 Marton Balint <[email protected]> */ #include "cx88.h" #include "cx88-reg.h" #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/jiffies.h> #include <asm/div64.h> #define INT_PI ((s32)(3.141592653589 * 32768.0)) #define compat_remainder(a, b) \ ((float)(((s32)((a) * 100)) % ((s32)((b) * 100))) / 100.0) #define baseband_freq(carrier, srate, tone) ((s32)( \ (compat_remainder(carrier + tone, srate)) / srate * 2 * INT_PI)) /* * We calculate the baseband frequencies of the carrier and the pilot tones * based on the sampling rate of the audio rds fifo. */ #define FREQ_A2_CARRIER baseband_freq(54687.5, 2689.36, 0.0) #define FREQ_A2_DUAL baseband_freq(54687.5, 2689.36, 274.1) #define FREQ_A2_STEREO baseband_freq(54687.5, 2689.36, 117.5) /* * The frequencies below are from the reference driver. They probably need * further adjustments, because they are not tested at all. You may even need * to play a bit with the registers of the chip to select the proper signal * for the input of the audio rds fifo, and measure it's sampling rate to * calculate the proper baseband frequencies... */ #define FREQ_A2M_CARRIER ((s32)(2.114516 * 32768.0)) #define FREQ_A2M_DUAL ((s32)(2.754916 * 32768.0)) #define FREQ_A2M_STEREO ((s32)(2.462326 * 32768.0)) #define FREQ_EIAJ_CARRIER ((s32)(1.963495 * 32768.0)) /* 5pi/8 */ #define FREQ_EIAJ_DUAL ((s32)(2.562118 * 32768.0)) #define FREQ_EIAJ_STEREO ((s32)(2.601053 * 32768.0)) #define FREQ_BTSC_DUAL ((s32)(1.963495 * 32768.0)) /* 5pi/8 */ #define FREQ_BTSC_DUAL_REF ((s32)(1.374446 * 32768.0)) /* 7pi/16 */ #define FREQ_BTSC_SAP ((s32)(2.471532 * 32768.0)) #define FREQ_BTSC_SAP_REF ((s32)(1.730072 * 32768.0)) /* The spectrum of the signal should be empty between these frequencies. */ #define FREQ_NOISE_START ((s32)(0.100000 * 32768.0)) #define FREQ_NOISE_END ((s32)(1.200000 * 32768.0)) static unsigned int dsp_debug; module_param(dsp_debug, int, 0644); MODULE_PARM_DESC(dsp_debug, "enable audio dsp debug messages"); #define dprintk(level, fmt, arg...) do { \ if (dsp_debug >= level) \ printk(KERN_DEBUG pr_fmt("%s: dsp:" fmt), \ __func__, ##arg); \ } while (0) static s32 int_cos(u32 x) { u32 t2, t4, t6, t8; s32 ret; u16 period = x / INT_PI; if (period % 2) return -int_cos(x - INT_PI); x = x % INT_PI; if (x > INT_PI / 2) return -int_cos(INT_PI / 2 - (x % (INT_PI / 2))); /* * Now x is between 0 and INT_PI/2. * To calculate cos(x) we use it's Taylor polinom. */ t2 = x * x / 32768 / 2; t4 = t2 * x / 32768 * x / 32768 / 3 / 4; t6 = t4 * x / 32768 * x / 32768 / 5 / 6; t8 = t6 * x / 32768 * x / 32768 / 7 / 8; ret = 32768 - t2 + t4 - t6 + t8; return ret; } static u32 int_goertzel(s16 x[], u32 N, u32 freq) { /* * We use the Goertzel algorithm to determine the power of the * given frequency in the signal */ s32 s_prev = 0; s32 s_prev2 = 0; s32 coeff = 2 * int_cos(freq); u32 i; u64 tmp; u32 divisor; for (i = 0; i < N; i++) { s32 s = x[i] + ((s64)coeff * s_prev / 32768) - s_prev2; s_prev2 = s_prev; s_prev = s; } tmp = (s64)s_prev2 * s_prev2 + (s64)s_prev * s_prev - (s64)coeff * s_prev2 * s_prev / 32768; /* * XXX: N must be low enough so that N*N fits in s32. * Else we need two divisions. */ divisor = N * N; do_div(tmp, divisor); return (u32)tmp; } static u32 freq_magnitude(s16 x[], u32 N, u32 freq) { u32 sum = int_goertzel(x, N, freq); return (u32)int_sqrt(sum); } static u32 noise_magnitude(s16 x[], u32 N, u32 freq_start, u32 freq_end) { int i; u32 sum = 0; u32 freq_step; int samples = 5; if (N > 192) { /* The last 192 samples are enough for noise detection */ x += (N - 192); N = 192; } freq_step = (freq_end - freq_start) / (samples - 1); for (i = 0; i < samples; i++) { sum += int_goertzel(x, N, freq_start); freq_start += freq_step; } return (u32)int_sqrt(sum / samples); } static s32 detect_a2_a2m_eiaj(struct cx88_core *core, s16 x[], u32 N) { s32 carrier, stereo, dual, noise; s32 carrier_freq, stereo_freq, dual_freq; s32 ret; switch (core->tvaudio) { case WW_BG: case WW_DK: carrier_freq = FREQ_A2_CARRIER; stereo_freq = FREQ_A2_STEREO; dual_freq = FREQ_A2_DUAL; break; case WW_M: carrier_freq = FREQ_A2M_CARRIER; stereo_freq = FREQ_A2M_STEREO; dual_freq = FREQ_A2M_DUAL; break; case WW_EIAJ: carrier_freq = FREQ_EIAJ_CARRIER; stereo_freq = FREQ_EIAJ_STEREO; dual_freq = FREQ_EIAJ_DUAL; break; default: pr_warn("unsupported audio mode %d for %s\n", core->tvaudio, __func__); return UNSET; } carrier = freq_magnitude(x, N, carrier_freq); stereo = freq_magnitude(x, N, stereo_freq); dual = freq_magnitude(x, N, dual_freq); noise = noise_magnitude(x, N, FREQ_NOISE_START, FREQ_NOISE_END); dprintk(1, "detect a2/a2m/eiaj: carrier=%d, stereo=%d, dual=%d, noise=%d\n", carrier, stereo, dual, noise); if (stereo > dual) ret = V4L2_TUNER_SUB_STEREO; else ret = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; if (core->tvaudio == WW_EIAJ) { /* EIAJ checks may need adjustments */ if ((carrier > max(stereo, dual) * 2) && (carrier < max(stereo, dual) * 6) && (carrier > 20 && carrier < 200) && (max(stereo, dual) > min(stereo, dual))) { /* * For EIAJ the carrier is always present, * so we probably don't need noise detection */ return ret; } } else { if ((carrier > max(stereo, dual) * 2) && (carrier < max(stereo, dual) * 8) && (carrier > 20 && carrier < 200) && (noise < 10) && (max(stereo, dual) > min(stereo, dual) * 2)) { return ret; } } return V4L2_TUNER_SUB_MONO; } static s32 detect_btsc(struct cx88_core *core, s16 x[], u32 N) { s32 sap_ref = freq_magnitude(x, N, FREQ_BTSC_SAP_REF); s32 sap = freq_magnitude(x, N, FREQ_BTSC_SAP); s32 dual_ref = freq_magnitude(x, N, FREQ_BTSC_DUAL_REF); s32 dual = freq_magnitude(x, N, FREQ_BTSC_DUAL); dprintk(1, "detect btsc: dual_ref=%d, dual=%d, sap_ref=%d, sap=%d\n", dual_ref, dual, sap_ref, sap); /* FIXME: Currently not supported */ return UNSET; } static s16 *read_rds_samples(struct cx88_core *core, u32 *N) { const struct sram_channel *srch = &cx88_sram_channels[SRAM_CH27]; s16 *samples; unsigned int i; unsigned int bpl = srch->fifo_size / AUD_RDS_LINES; unsigned int spl = bpl / 4; unsigned int sample_count = spl * (AUD_RDS_LINES - 1); u32 current_address = cx_read(srch->ptr1_reg); u32 offset = (current_address - srch->fifo_start + bpl); dprintk(1, "read RDS samples: current_address=%08x (offset=%08x), sample_count=%d, aud_intstat=%08x\n", current_address, current_address - srch->fifo_start, sample_count, cx_read(MO_AUD_INTSTAT)); samples = kmalloc_array(sample_count, sizeof(*samples), GFP_KERNEL); if (!samples) return NULL; *N = sample_count; for (i = 0; i < sample_count; i++) { offset = offset % (AUD_RDS_LINES * bpl); samples[i] = cx_read(srch->fifo_start + offset); offset += 4; } dprintk(2, "RDS samples dump: %*ph\n", sample_count, samples); return samples; } s32 cx88_dsp_detect_stereo_sap(struct cx88_core *core) { s16 *samples; u32 N = 0; s32 ret = UNSET; /* If audio RDS fifo is disabled, we can't read the samples */ if (!(cx_read(MO_AUD_DMACNTRL) & 0x04)) return ret; if (!(cx_read(AUD_CTL) & EN_FMRADIO_EN_RDS)) return ret; /* Wait at least 500 ms after an audio standard change */ if (time_before(jiffies, core->last_change + msecs_to_jiffies(500))) return ret; samples = read_rds_samples(core, &N); if (!samples) return ret; switch (core->tvaudio) { case WW_BG: case WW_DK: case WW_EIAJ: case WW_M: ret = detect_a2_a2m_eiaj(core, samples, N); break; case WW_BTSC: ret = detect_btsc(core, samples, N); break; case WW_NONE: case WW_I: case WW_L: case WW_I2SPT: case WW_FM: case WW_I2SADC: break; } kfree(samples); if (ret != UNSET) dprintk(1, "stereo/sap detection result:%s%s%s\n", (ret & V4L2_TUNER_SUB_MONO) ? " mono" : "", (ret & V4L2_TUNER_SUB_STEREO) ? " stereo" : "", (ret & V4L2_TUNER_SUB_LANG2) ? " dual" : ""); return ret; } EXPORT_SYMBOL(cx88_dsp_detect_stereo_sap);
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. */ #include <linux/bitfield.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> /* FIC Registers */ #define AL_FIC_CAUSE 0x00 #define AL_FIC_SET_CAUSE 0x08 #define AL_FIC_MASK 0x10 #define AL_FIC_CONTROL 0x28 #define CONTROL_TRIGGER_RISING BIT(3) #define CONTROL_MASK_MSI_X BIT(5) #define NR_FIC_IRQS 32 MODULE_AUTHOR("Talel Shenhar"); MODULE_DESCRIPTION("Amazon's Annapurna Labs Interrupt Controller Driver"); enum al_fic_state { AL_FIC_UNCONFIGURED = 0, AL_FIC_CONFIGURED_LEVEL, AL_FIC_CONFIGURED_RISING_EDGE, }; struct al_fic { void __iomem *base; struct irq_domain *domain; const char *name; unsigned int parent_irq; enum al_fic_state state; }; static void al_fic_set_trigger(struct al_fic *fic, struct irq_chip_generic *gc, enum al_fic_state new_state) { irq_flow_handler_t handler; u32 control = readl_relaxed(fic->base + AL_FIC_CONTROL); if (new_state == AL_FIC_CONFIGURED_LEVEL) { handler = handle_level_irq; control &= ~CONTROL_TRIGGER_RISING; } else { handler = handle_edge_irq; control |= CONTROL_TRIGGER_RISING; } gc->chip_types->handler = handler; fic->state = new_state; writel_relaxed(control, fic->base + AL_FIC_CONTROL); } static int al_fic_irq_set_type(struct irq_data *data, unsigned int flow_type) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); struct al_fic *fic = gc->private; enum al_fic_state new_state; int ret = 0; irq_gc_lock(gc); if (((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_LEVEL_HIGH) && ((flow_type & IRQ_TYPE_SENSE_MASK) != IRQ_TYPE_EDGE_RISING)) { pr_debug("fic doesn't support flow type %d\n", flow_type); ret = -EINVAL; goto err; } new_state = (flow_type & IRQ_TYPE_LEVEL_HIGH) ? AL_FIC_CONFIGURED_LEVEL : AL_FIC_CONFIGURED_RISING_EDGE; /* * A given FIC instance can be either all level or all edge triggered. * This is generally fixed depending on what pieces of HW it's wired up * to. * * We configure it based on the sensitivity of the first source * being setup, and reject any subsequent attempt at configuring it in a * different way. */ if (fic->state == AL_FIC_UNCONFIGURED) { al_fic_set_trigger(fic, gc, new_state); } else if (fic->state != new_state) { pr_debug("fic %s state already configured to %d\n", fic->name, fic->state); ret = -EINVAL; goto err; } err: irq_gc_unlock(gc); return ret; } static void al_fic_irq_handler(struct irq_desc *desc) { struct al_fic *fic = irq_desc_get_handler_data(desc); struct irq_domain *domain = fic->domain; struct irq_chip *irqchip = irq_desc_get_chip(desc); struct irq_chip_generic *gc = irq_get_domain_generic_chip(domain, 0); unsigned long pending; u32 hwirq; chained_irq_enter(irqchip, desc); pending = readl_relaxed(fic->base + AL_FIC_CAUSE); pending &= ~gc->mask_cache; for_each_set_bit(hwirq, &pending, NR_FIC_IRQS) generic_handle_domain_irq(domain, hwirq); chained_irq_exit(irqchip, desc); } static int al_fic_irq_retrigger(struct irq_data *data) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); struct al_fic *fic = gc->private; writel_relaxed(BIT(data->hwirq), fic->base + AL_FIC_SET_CAUSE); return 1; } static int al_fic_register(struct device_node *node, struct al_fic *fic) { struct irq_chip_generic *gc; int ret; fic->domain = irq_domain_add_linear(node, NR_FIC_IRQS, &irq_generic_chip_ops, fic); if (!fic->domain) { pr_err("fail to add irq domain\n"); return -ENOMEM; } ret = irq_alloc_domain_generic_chips(fic->domain, NR_FIC_IRQS, 1, fic->name, handle_level_irq, 0, 0, IRQ_GC_INIT_MASK_CACHE); if (ret) { pr_err("fail to allocate generic chip (%d)\n", ret); goto err_domain_remove; } gc = irq_get_domain_generic_chip(fic->domain, 0); gc->reg_base = fic->base; gc->chip_types->regs.mask = AL_FIC_MASK; gc->chip_types->regs.ack = AL_FIC_CAUSE; gc->chip_types->chip.irq_mask = irq_gc_mask_set_bit; gc->chip_types->chip.irq_unmask = irq_gc_mask_clr_bit; gc->chip_types->chip.irq_ack = irq_gc_ack_clr_bit; gc->chip_types->chip.irq_set_type = al_fic_irq_set_type; gc->chip_types->chip.irq_retrigger = al_fic_irq_retrigger; gc->chip_types->chip.flags = IRQCHIP_SKIP_SET_WAKE; gc->private = fic; irq_set_chained_handler_and_data(fic->parent_irq, al_fic_irq_handler, fic); return 0; err_domain_remove: irq_domain_remove(fic->domain); return ret; } /* * al_fic_wire_init() - initialize and configure fic in wire mode * @of_node: optional pointer to interrupt controller's device tree node. * @base: mmio to fic register * @name: name of the fic * @parent_irq: interrupt of parent * * This API will configure the fic hardware to to work in wire mode. * In wire mode, fic hardware is generating a wire ("wired") interrupt. * Interrupt can be generated based on positive edge or level - configuration is * to be determined based on connected hardware to this fic. */ static struct al_fic *al_fic_wire_init(struct device_node *node, void __iomem *base, const char *name, unsigned int parent_irq) { struct al_fic *fic; int ret; u32 control = CONTROL_MASK_MSI_X; fic = kzalloc(sizeof(*fic), GFP_KERNEL); if (!fic) return ERR_PTR(-ENOMEM); fic->base = base; fic->parent_irq = parent_irq; fic->name = name; /* mask out all interrupts */ writel_relaxed(0xFFFFFFFF, fic->base + AL_FIC_MASK); /* clear any pending interrupt */ writel_relaxed(0, fic->base + AL_FIC_CAUSE); writel_relaxed(control, fic->base + AL_FIC_CONTROL); ret = al_fic_register(node, fic); if (ret) { pr_err("fail to register irqchip\n"); goto err_free; } pr_debug("%s initialized successfully in Legacy mode (parent-irq=%u)\n", fic->name, parent_irq); return fic; err_free: kfree(fic); return ERR_PTR(ret); } static int __init al_fic_init_dt(struct device_node *node, struct device_node *parent) { int ret; void __iomem *base; unsigned int parent_irq; struct al_fic *fic; if (!parent) { pr_err("%s: unsupported - device require a parent\n", node->name); return -EINVAL; } base = of_iomap(node, 0); if (!base) { pr_err("%s: fail to map memory\n", node->name); return -ENOMEM; } parent_irq = irq_of_parse_and_map(node, 0); if (!parent_irq) { pr_err("%s: fail to map irq\n", node->name); ret = -EINVAL; goto err_unmap; } fic = al_fic_wire_init(node, base, node->name, parent_irq); if (IS_ERR(fic)) { pr_err("%s: fail to initialize irqchip (%lu)\n", node->name, PTR_ERR(fic)); ret = PTR_ERR(fic); goto err_irq_dispose; } return 0; err_irq_dispose: irq_dispose_mapping(parent_irq); err_unmap: iounmap(base); return ret; } IRQCHIP_DECLARE(al_fic, "amazon,al-fic", al_fic_init_dt);
/* SPDX-License-Identifier: GPL-2.0 */ /* * V4L2 Capture ISI subdev for i.MX8QXP/QM platform * * ISI is a Image Sensor Interface of i.MX8QXP/QM platform, which * used to process image from camera sensor to memory or DC * Copyright 2019-2020 NXP */ #ifndef __MXC_ISI_CORE_H__ #define __MXC_ISI_CORE_H__ #include <linux/list.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/videodev2.h> #include <media/media-device.h> #include <media/media-entity.h> #include <media/v4l2-async.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-dev.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include <media/videobuf2-core.h> #include <media/videobuf2-v4l2.h> struct clk_bulk_data; struct dentry; struct device; struct media_intf_devnode; struct regmap; struct v4l2_m2m_dev; /* Pipeline pads */ #define MXC_ISI_PIPE_PAD_SINK 0 #define MXC_ISI_PIPE_PAD_SOURCE 1 #define MXC_ISI_PIPE_PADS_NUM 2 #define MXC_ISI_MIN_WIDTH 1U #define MXC_ISI_MIN_HEIGHT 1U #define MXC_ISI_MAX_WIDTH_UNCHAINED 2048U #define MXC_ISI_MAX_WIDTH_CHAINED 4096U #define MXC_ISI_MAX_HEIGHT 8191U #define MXC_ISI_DEF_WIDTH 1920U #define MXC_ISI_DEF_HEIGHT 1080U #define MXC_ISI_DEF_MBUS_CODE_SINK MEDIA_BUS_FMT_UYVY8_1X16 #define MXC_ISI_DEF_MBUS_CODE_SOURCE MEDIA_BUS_FMT_YUV8_1X24 #define MXC_ISI_DEF_PIXEL_FORMAT V4L2_PIX_FMT_YUYV #define MXC_ISI_DEF_COLOR_SPACE V4L2_COLORSPACE_SRGB #define MXC_ISI_DEF_YCBCR_ENC V4L2_YCBCR_ENC_601 #define MXC_ISI_DEF_QUANTIZATION V4L2_QUANTIZATION_LIM_RANGE #define MXC_ISI_DEF_XFER_FUNC V4L2_XFER_FUNC_SRGB #define MXC_ISI_DRIVER_NAME "mxc-isi" #define MXC_ISI_CAPTURE "mxc-isi-cap" #define MXC_ISI_M2M "mxc-isi-m2m" #define MXC_MAX_PLANES 3 struct mxc_isi_dev; struct mxc_isi_m2m_ctx; enum mxc_isi_buf_id { MXC_ISI_BUF1 = 0x0, MXC_ISI_BUF2, }; enum mxc_isi_encoding { MXC_ISI_ENC_RAW, MXC_ISI_ENC_RGB, MXC_ISI_ENC_YUV, }; enum mxc_isi_input_id { /* Inputs from the crossbar switch range from 0 to 15 */ MXC_ISI_INPUT_MEM = 16, }; enum mxc_isi_video_type { MXC_ISI_VIDEO_CAP = BIT(0), MXC_ISI_VIDEO_M2M_OUT = BIT(1), MXC_ISI_VIDEO_M2M_CAP = BIT(2), }; struct mxc_isi_format_info { u32 mbus_code; u32 fourcc; enum mxc_isi_video_type type; u32 isi_in_format; u32 isi_out_format; u8 mem_planes; u8 color_planes; u8 depth[MXC_MAX_PLANES]; u8 hsub; u8 vsub; enum mxc_isi_encoding encoding; }; struct mxc_isi_bus_format_info { u32 mbus_code; u32 output; u32 pads; enum mxc_isi_encoding encoding; }; struct mxc_isi_buffer { struct vb2_v4l2_buffer v4l2_buf; struct list_head list; dma_addr_t dma_addrs[3]; enum mxc_isi_buf_id id; bool discard; }; struct mxc_isi_reg { u32 offset; u32 mask; }; struct mxc_isi_ier_reg { /* Overflow Y/U/V trigger enable*/ struct mxc_isi_reg oflw_y_buf_en; struct mxc_isi_reg oflw_u_buf_en; struct mxc_isi_reg oflw_v_buf_en; /* Excess overflow Y/U/V trigger enable*/ struct mxc_isi_reg excs_oflw_y_buf_en; struct mxc_isi_reg excs_oflw_u_buf_en; struct mxc_isi_reg excs_oflw_v_buf_en; /* Panic Y/U/V trigger enable*/ struct mxc_isi_reg panic_y_buf_en; struct mxc_isi_reg panic_v_buf_en; struct mxc_isi_reg panic_u_buf_en; }; struct mxc_isi_panic_thd { u32 mask; u32 offset; u32 threshold; }; struct mxc_isi_set_thd { struct mxc_isi_panic_thd panic_set_thd_y; struct mxc_isi_panic_thd panic_set_thd_u; struct mxc_isi_panic_thd panic_set_thd_v; }; struct mxc_gasket_ops { void (*enable)(struct mxc_isi_dev *isi, const struct v4l2_mbus_frame_desc *fd, const struct v4l2_mbus_framefmt *fmt, const unsigned int port); void (*disable)(struct mxc_isi_dev *isi, const unsigned int port); }; enum model { MXC_ISI_IMX8MN, MXC_ISI_IMX8MP, MXC_ISI_IMX93, }; struct mxc_isi_plat_data { enum model model; unsigned int num_ports; unsigned int num_channels; unsigned int reg_offset; const struct mxc_isi_ier_reg *ier_reg; const struct mxc_isi_set_thd *set_thd; const struct mxc_gasket_ops *gasket_ops; const struct clk_bulk_data *clks; unsigned int num_clks; bool buf_active_reverse; bool has_36bit_dma; }; struct mxc_isi_dma_buffer { size_t size; void *addr; dma_addr_t dma; }; struct mxc_isi_input { unsigned int enable_count; }; struct mxc_isi_crossbar { struct mxc_isi_dev *isi; unsigned int num_sinks; unsigned int num_sources; struct mxc_isi_input *inputs; struct v4l2_subdev sd; struct media_pad *pads; }; struct mxc_isi_video { struct mxc_isi_pipe *pipe; struct video_device vdev; struct media_pad pad; /* Protects is_streaming, and the vdev and vb2_q operations */ struct mutex lock; bool is_streaming; struct v4l2_pix_format_mplane pix; const struct mxc_isi_format_info *fmtinfo; struct { struct v4l2_ctrl_handler handler; unsigned int alpha; bool hflip; bool vflip; } ctrls; struct vb2_queue vb2_q; struct mxc_isi_buffer buf_discard[3]; struct list_head out_pending; struct list_head out_active; struct list_head out_discard; u32 frame_count; /* Protects out_pending, out_active, out_discard and frame_count */ spinlock_t buf_lock; struct mxc_isi_dma_buffer discard_buffer[MXC_MAX_PLANES]; }; typedef void(*mxc_isi_pipe_irq_t)(struct mxc_isi_pipe *, u32); struct mxc_isi_pipe { struct mxc_isi_dev *isi; u32 id; void __iomem *regs; struct media_pipeline pipe; struct v4l2_subdev sd; struct media_pad pads[MXC_ISI_PIPE_PADS_NUM]; struct mxc_isi_video video; /* * Protects use_count, irq_handler, res_available, res_acquired, * chained_res, and the CHNL_CTRL register. */ struct mutex lock; unsigned int use_count; mxc_isi_pipe_irq_t irq_handler; #define MXC_ISI_CHANNEL_RES_LINE_BUF BIT(0) #define MXC_ISI_CHANNEL_RES_OUTPUT_BUF BIT(1) u8 available_res; u8 acquired_res; u8 chained_res; bool chained; }; struct mxc_isi_m2m { struct mxc_isi_dev *isi; struct mxc_isi_pipe *pipe; struct media_pad pad; struct video_device vdev; struct media_intf_devnode *intf; struct v4l2_m2m_dev *m2m_dev; /* Protects last_ctx, usage_count and chained_count */ struct mutex lock; struct mxc_isi_m2m_ctx *last_ctx; int usage_count; int chained_count; }; struct mxc_isi_dev { struct device *dev; const struct mxc_isi_plat_data *pdata; void __iomem *regs; struct clk_bulk_data *clks; struct regmap *gasket; struct mxc_isi_crossbar crossbar; struct mxc_isi_pipe *pipes; struct mxc_isi_m2m m2m; struct media_device media_dev; struct v4l2_device v4l2_dev; struct v4l2_async_notifier notifier; struct dentry *debugfs_root; }; extern const struct mxc_gasket_ops mxc_imx8_gasket_ops; extern const struct mxc_gasket_ops mxc_imx93_gasket_ops; int mxc_isi_crossbar_init(struct mxc_isi_dev *isi); void mxc_isi_crossbar_cleanup(struct mxc_isi_crossbar *xbar); int mxc_isi_crossbar_register(struct mxc_isi_crossbar *xbar); void mxc_isi_crossbar_unregister(struct mxc_isi_crossbar *xbar); const struct mxc_isi_bus_format_info * mxc_isi_bus_format_by_code(u32 code, unsigned int pad); const struct mxc_isi_bus_format_info * mxc_isi_bus_format_by_index(unsigned int index, unsigned int pad); const struct mxc_isi_format_info * mxc_isi_format_by_fourcc(u32 fourcc, enum mxc_isi_video_type type); const struct mxc_isi_format_info * mxc_isi_format_enum(unsigned int index, enum mxc_isi_video_type type); const struct mxc_isi_format_info * mxc_isi_format_try(struct mxc_isi_pipe *pipe, struct v4l2_pix_format_mplane *pix, enum mxc_isi_video_type type); int mxc_isi_pipe_init(struct mxc_isi_dev *isi, unsigned int id); void mxc_isi_pipe_cleanup(struct mxc_isi_pipe *pipe); int mxc_isi_pipe_acquire(struct mxc_isi_pipe *pipe, mxc_isi_pipe_irq_t irq_handler); void mxc_isi_pipe_release(struct mxc_isi_pipe *pipe); int mxc_isi_pipe_enable(struct mxc_isi_pipe *pipe); void mxc_isi_pipe_disable(struct mxc_isi_pipe *pipe); int mxc_isi_video_register(struct mxc_isi_pipe *pipe, struct v4l2_device *v4l2_dev); void mxc_isi_video_unregister(struct mxc_isi_pipe *pipe); void mxc_isi_video_suspend(struct mxc_isi_pipe *pipe); int mxc_isi_video_resume(struct mxc_isi_pipe *pipe); int mxc_isi_video_queue_setup(const struct v4l2_pix_format_mplane *format, const struct mxc_isi_format_info *info, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[]); void mxc_isi_video_buffer_init(struct vb2_buffer *vb2, dma_addr_t dma_addrs[3], const struct mxc_isi_format_info *info, const struct v4l2_pix_format_mplane *pix); int mxc_isi_video_buffer_prepare(struct mxc_isi_dev *isi, struct vb2_buffer *vb2, const struct mxc_isi_format_info *info, const struct v4l2_pix_format_mplane *pix); #ifdef CONFIG_VIDEO_IMX8_ISI_M2M int mxc_isi_m2m_register(struct mxc_isi_dev *isi, struct v4l2_device *v4l2_dev); int mxc_isi_m2m_unregister(struct mxc_isi_dev *isi); #else static inline int mxc_isi_m2m_register(struct mxc_isi_dev *isi, struct v4l2_device *v4l2_dev) { return 0; } static inline int mxc_isi_m2m_unregister(struct mxc_isi_dev *isi) { return 0; } #endif int mxc_isi_channel_acquire(struct mxc_isi_pipe *pipe, mxc_isi_pipe_irq_t irq_handler, bool bypass); void mxc_isi_channel_release(struct mxc_isi_pipe *pipe); void mxc_isi_channel_get(struct mxc_isi_pipe *pipe); void mxc_isi_channel_put(struct mxc_isi_pipe *pipe); void mxc_isi_channel_enable(struct mxc_isi_pipe *pipe); void mxc_isi_channel_disable(struct mxc_isi_pipe *pipe); int mxc_isi_channel_chain(struct mxc_isi_pipe *pipe, bool bypass); void mxc_isi_channel_unchain(struct mxc_isi_pipe *pipe); void mxc_isi_channel_config(struct mxc_isi_pipe *pipe, enum mxc_isi_input_id input, const struct v4l2_area *in_size, const struct v4l2_area *scale, const struct v4l2_rect *crop, enum mxc_isi_encoding in_encoding, enum mxc_isi_encoding out_encoding); void mxc_isi_channel_set_input_format(struct mxc_isi_pipe *pipe, const struct mxc_isi_format_info *info, const struct v4l2_pix_format_mplane *format); void mxc_isi_channel_set_output_format(struct mxc_isi_pipe *pipe, const struct mxc_isi_format_info *info, struct v4l2_pix_format_mplane *format); void mxc_isi_channel_m2m_start(struct mxc_isi_pipe *pipe); void mxc_isi_channel_set_alpha(struct mxc_isi_pipe *pipe, u8 alpha); void mxc_isi_channel_set_flip(struct mxc_isi_pipe *pipe, bool hflip, bool vflip); void mxc_isi_channel_set_inbuf(struct mxc_isi_pipe *pipe, dma_addr_t dma_addr); void mxc_isi_channel_set_outbuf(struct mxc_isi_pipe *pipe, const dma_addr_t dma_addrs[3], enum mxc_isi_buf_id buf_id); u32 mxc_isi_channel_irq_status(struct mxc_isi_pipe *pipe, bool clear); void mxc_isi_channel_irq_clear(struct mxc_isi_pipe *pipe); #if IS_ENABLED(CONFIG_DEBUG_FS) void mxc_isi_debug_init(struct mxc_isi_dev *isi); void mxc_isi_debug_cleanup(struct mxc_isi_dev *isi); #else static inline void mxc_isi_debug_init(struct mxc_isi_dev *isi) { } static inline void mxc_isi_debug_cleanup(struct mxc_isi_dev *isi) { } #endif #endif /* __MXC_ISI_CORE_H__ */
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * arch/arm/probes/decode-thumb.h * * Copyright 2013 Linaro Ltd. * Written by: David A. Long */ #ifndef _ARM_KERNEL_PROBES_THUMB_H #define _ARM_KERNEL_PROBES_THUMB_H #include "decode.h" /* * True if current instruction is in an IT block. */ #define in_it_block(cpsr) ((cpsr & 0x06000c00) != 0x00000000) /* * Return the condition code to check for the currently executing instruction. * This is in ITSTATE<7:4> which is in CPSR<15:12> but is only valid if * in_it_block returns true. */ #define current_cond(cpsr) ((cpsr >> 12) & 0xf) enum probes_t32_action { PROBES_T32_EMULATE_NONE, PROBES_T32_SIMULATE_NOP, PROBES_T32_LDMSTM, PROBES_T32_LDRDSTRD, PROBES_T32_TABLE_BRANCH, PROBES_T32_TST, PROBES_T32_CMP, PROBES_T32_MOV, PROBES_T32_ADDSUB, PROBES_T32_LOGICAL, PROBES_T32_ADDWSUBW_PC, PROBES_T32_ADDWSUBW, PROBES_T32_MOVW, PROBES_T32_SAT, PROBES_T32_BITFIELD, PROBES_T32_SEV, PROBES_T32_WFE, PROBES_T32_MRS, PROBES_T32_BRANCH_COND, PROBES_T32_BRANCH, PROBES_T32_PLDI, PROBES_T32_LDR_LIT, PROBES_T32_LDRSTR, PROBES_T32_SIGN_EXTEND, PROBES_T32_MEDIA, PROBES_T32_REVERSE, PROBES_T32_MUL_ADD, PROBES_T32_MUL_ADD2, PROBES_T32_MUL_ADD_LONG, NUM_PROBES_T32_ACTIONS }; enum probes_t16_action { PROBES_T16_ADD_SP, PROBES_T16_CBZ, PROBES_T16_SIGN_EXTEND, PROBES_T16_PUSH, PROBES_T16_POP, PROBES_T16_SEV, PROBES_T16_WFE, PROBES_T16_IT, PROBES_T16_CMP, PROBES_T16_ADDSUB, PROBES_T16_LOGICAL, PROBES_T16_BLX, PROBES_T16_HIREGOPS, PROBES_T16_LDR_LIT, PROBES_T16_LDRHSTRH, PROBES_T16_LDRSTR, PROBES_T16_ADR, PROBES_T16_LDMSTM, PROBES_T16_BRANCH_COND, PROBES_T16_BRANCH, NUM_PROBES_T16_ACTIONS }; extern const union decode_item probes_decode_thumb32_table[]; extern const union decode_item probes_decode_thumb16_table[]; enum probes_insn __kprobes thumb16_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi, bool emulate, const union decode_action *actions, const struct decode_checker *checkers[]); enum probes_insn __kprobes thumb32_probes_decode_insn(probes_opcode_t insn, struct arch_probes_insn *asi, bool emulate, const union decode_action *actions, const struct decode_checker *checkers[]); #endif
/****************************************************************************** * Client-facing interface for the Xenbus driver. In other words, the * interface between the Xenbus and the device-specific code, be it the * frontend or the backend of that driver. * * Copyright (C) 2005 XenSource Ltd * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/mm.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/export.h> #include <asm/xen/hypervisor.h> #include <xen/page.h> #include <xen/interface/xen.h> #include <xen/interface/event_channel.h> #include <xen/balloon.h> #include <xen/events.h> #include <xen/grant_table.h> #include <xen/xenbus.h> #include <xen/xen.h> #include <xen/features.h> #include "xenbus.h" #define XENBUS_PAGES(_grants) (DIV_ROUND_UP(_grants, XEN_PFN_PER_PAGE)) #define XENBUS_MAX_RING_PAGES (XENBUS_PAGES(XENBUS_MAX_RING_GRANTS)) struct xenbus_map_node { struct list_head next; union { struct { struct vm_struct *area; } pv; struct { struct page *pages[XENBUS_MAX_RING_PAGES]; unsigned long addrs[XENBUS_MAX_RING_GRANTS]; void *addr; } hvm; }; grant_handle_t handles[XENBUS_MAX_RING_GRANTS]; unsigned int nr_handles; }; struct map_ring_valloc { struct xenbus_map_node *node; /* Why do we need two arrays? See comment of __xenbus_map_ring */ unsigned long addrs[XENBUS_MAX_RING_GRANTS]; phys_addr_t phys_addrs[XENBUS_MAX_RING_GRANTS]; struct gnttab_map_grant_ref map[XENBUS_MAX_RING_GRANTS]; struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; unsigned int idx; }; static DEFINE_SPINLOCK(xenbus_valloc_lock); static LIST_HEAD(xenbus_valloc_pages); struct xenbus_ring_ops { int (*map)(struct xenbus_device *dev, struct map_ring_valloc *info, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr); int (*unmap)(struct xenbus_device *dev, void *vaddr); }; static const struct xenbus_ring_ops *ring_ops __read_mostly; const char *xenbus_strstate(enum xenbus_state state) { static const char *const name[] = { [ XenbusStateUnknown ] = "Unknown", [ XenbusStateInitialising ] = "Initialising", [ XenbusStateInitWait ] = "InitWait", [ XenbusStateInitialised ] = "Initialised", [ XenbusStateConnected ] = "Connected", [ XenbusStateClosing ] = "Closing", [ XenbusStateClosed ] = "Closed", [XenbusStateReconfiguring] = "Reconfiguring", [XenbusStateReconfigured] = "Reconfigured", }; return (state < ARRAY_SIZE(name)) ? name[state] : "INVALID"; } EXPORT_SYMBOL_GPL(xenbus_strstate); /** * xenbus_watch_path - register a watch * @dev: xenbus device * @path: path to watch * @watch: watch to register * @will_handle: events queuing determine callback * @callback: callback to register * * Register a @watch on the given path, using the given xenbus_watch structure * for storage, @will_handle function as the callback to determine if each * event need to be queued, and the given @callback function as the callback. * On success, the given @path will be saved as @watch->node, and remains the * caller's to free. On error, @watch->node will be NULL, the device will * switch to %XenbusStateClosing, and the error will be saved in the store. * * Returns: %0 on success or -errno on error */ int xenbus_watch_path(struct xenbus_device *dev, const char *path, struct xenbus_watch *watch, bool (*will_handle)(struct xenbus_watch *, const char *, const char *), void (*callback)(struct xenbus_watch *, const char *, const char *)) { int err; watch->node = path; watch->will_handle = will_handle; watch->callback = callback; err = register_xenbus_watch(watch); if (err) { watch->node = NULL; watch->will_handle = NULL; watch->callback = NULL; xenbus_dev_fatal(dev, err, "adding watch on %s", path); } return err; } EXPORT_SYMBOL_GPL(xenbus_watch_path); /** * xenbus_watch_pathfmt - register a watch on a sprintf-formatted path * @dev: xenbus device * @watch: watch to register * @will_handle: events queuing determine callback * @callback: callback to register * @pathfmt: format of path to watch * * Register a watch on the given @path, using the given xenbus_watch * structure for storage, @will_handle function as the callback to determine if * each event need to be queued, and the given @callback function as the * callback. On success, the watched path (@path/@path2) will be saved * as @watch->node, and becomes the caller's to kfree(). * On error, watch->node will be NULL, so the caller has nothing to * free, the device will switch to %XenbusStateClosing, and the error will be * saved in the store. * * Returns: %0 on success or -errno on error */ int xenbus_watch_pathfmt(struct xenbus_device *dev, struct xenbus_watch *watch, bool (*will_handle)(struct xenbus_watch *, const char *, const char *), void (*callback)(struct xenbus_watch *, const char *, const char *), const char *pathfmt, ...) { int err; va_list ap; char *path; va_start(ap, pathfmt); path = kvasprintf(GFP_NOIO | __GFP_HIGH, pathfmt, ap); va_end(ap); if (!path) { xenbus_dev_fatal(dev, -ENOMEM, "allocating path for watch"); return -ENOMEM; } err = xenbus_watch_path(dev, path, watch, will_handle, callback); if (err) kfree(path); return err; } EXPORT_SYMBOL_GPL(xenbus_watch_pathfmt); static void xenbus_switch_fatal(struct xenbus_device *, int, int, const char *, ...); static int __xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state, int depth) { /* We check whether the state is currently set to the given value, and if not, then the state is set. We don't want to unconditionally write the given state, because we don't want to fire watches unnecessarily. Furthermore, if the node has gone, we don't write to it, as the device will be tearing down, and we don't want to resurrect that directory. Note that, because of this cached value of our state, this function will not take a caller's Xenstore transaction (something it was trying to in the past) because dev->state would not get reset if the transaction was aborted. */ struct xenbus_transaction xbt; int current_state; int err, abort; if (state == dev->state) return 0; again: abort = 1; err = xenbus_transaction_start(&xbt); if (err) { xenbus_switch_fatal(dev, depth, err, "starting transaction"); return 0; } err = xenbus_scanf(xbt, dev->nodename, "state", "%d", &current_state); if (err != 1) goto abort; err = xenbus_printf(xbt, dev->nodename, "state", "%d", state); if (err) { xenbus_switch_fatal(dev, depth, err, "writing new state"); goto abort; } abort = 0; abort: err = xenbus_transaction_end(xbt, abort); if (err) { if (err == -EAGAIN && !abort) goto again; xenbus_switch_fatal(dev, depth, err, "ending transaction"); } else dev->state = state; return 0; } /** * xenbus_switch_state - save the new state of a driver * @dev: xenbus device * @state: new state * * Advertise in the store a change of the given driver to the given new_state. * On error, the device will switch to XenbusStateClosing, and the error * will be saved in the store. * * Returns: %0 on success or -errno on error */ int xenbus_switch_state(struct xenbus_device *dev, enum xenbus_state state) { return __xenbus_switch_state(dev, state, 0); } EXPORT_SYMBOL_GPL(xenbus_switch_state); int xenbus_frontend_closed(struct xenbus_device *dev) { xenbus_switch_state(dev, XenbusStateClosed); complete(&dev->down); return 0; } EXPORT_SYMBOL_GPL(xenbus_frontend_closed); static void xenbus_va_dev_error(struct xenbus_device *dev, int err, const char *fmt, va_list ap) { unsigned int len; char *printf_buffer; char *path_buffer; #define PRINTF_BUFFER_SIZE 4096 printf_buffer = kmalloc(PRINTF_BUFFER_SIZE, GFP_KERNEL); if (!printf_buffer) return; len = sprintf(printf_buffer, "%i ", -err); vsnprintf(printf_buffer + len, PRINTF_BUFFER_SIZE - len, fmt, ap); dev_err(&dev->dev, "%s\n", printf_buffer); path_buffer = kasprintf(GFP_KERNEL, "error/%s", dev->nodename); if (path_buffer) xenbus_write(XBT_NIL, path_buffer, "error", printf_buffer); kfree(printf_buffer); kfree(path_buffer); } /** * xenbus_dev_error - place an error message into the store * @dev: xenbus device * @err: error to report * @fmt: error message format * * Report the given negative errno into the store, along with the given * formatted message. */ void xenbus_dev_error(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); xenbus_va_dev_error(dev, err, fmt, ap); va_end(ap); } EXPORT_SYMBOL_GPL(xenbus_dev_error); /** * xenbus_dev_fatal - put an error messages into the store and then shutdown * @dev: xenbus device * @err: error to report * @fmt: error message format * * Equivalent to xenbus_dev_error(dev, err, fmt, args), followed by * xenbus_switch_state(dev, XenbusStateClosing) to schedule an orderly * closedown of this driver and its peer. */ void xenbus_dev_fatal(struct xenbus_device *dev, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); xenbus_va_dev_error(dev, err, fmt, ap); va_end(ap); xenbus_switch_state(dev, XenbusStateClosing); } EXPORT_SYMBOL_GPL(xenbus_dev_fatal); /* * Equivalent to xenbus_dev_fatal(dev, err, fmt, args), but helps * avoiding recursion within xenbus_switch_state. */ static void xenbus_switch_fatal(struct xenbus_device *dev, int depth, int err, const char *fmt, ...) { va_list ap; va_start(ap, fmt); xenbus_va_dev_error(dev, err, fmt, ap); va_end(ap); if (!depth) __xenbus_switch_state(dev, XenbusStateClosing, 1); } /* * xenbus_setup_ring * @dev: xenbus device * @vaddr: pointer to starting virtual address of the ring * @nr_pages: number of pages to be granted * @grefs: grant reference array to be filled in * * Allocate physically contiguous pages for a shared ring buffer and grant it * to the peer of the given device. The ring buffer is initially filled with * zeroes. The virtual address of the ring is stored at @vaddr and the * grant references are stored in the @grefs array. In case of error @vaddr * will be set to NULL and @grefs will be filled with INVALID_GRANT_REF. */ int xenbus_setup_ring(struct xenbus_device *dev, gfp_t gfp, void **vaddr, unsigned int nr_pages, grant_ref_t *grefs) { unsigned long ring_size = nr_pages * XEN_PAGE_SIZE; grant_ref_t gref_head; unsigned int i; void *addr; int ret; addr = *vaddr = alloc_pages_exact(ring_size, gfp | __GFP_ZERO); if (!*vaddr) { ret = -ENOMEM; goto err; } ret = gnttab_alloc_grant_references(nr_pages, &gref_head); if (ret) { xenbus_dev_fatal(dev, ret, "granting access to %u ring pages", nr_pages); goto err; } for (i = 0; i < nr_pages; i++) { unsigned long gfn; if (is_vmalloc_addr(*vaddr)) gfn = pfn_to_gfn(vmalloc_to_pfn(addr)); else gfn = virt_to_gfn(addr); grefs[i] = gnttab_claim_grant_reference(&gref_head); gnttab_grant_foreign_access_ref(grefs[i], dev->otherend_id, gfn, 0); addr += XEN_PAGE_SIZE; } return 0; err: if (*vaddr) free_pages_exact(*vaddr, ring_size); for (i = 0; i < nr_pages; i++) grefs[i] = INVALID_GRANT_REF; *vaddr = NULL; return ret; } EXPORT_SYMBOL_GPL(xenbus_setup_ring); /* * xenbus_teardown_ring * @vaddr: starting virtual address of the ring * @nr_pages: number of pages * @grefs: grant reference array * * Remove grants for the shared ring buffer and free the associated memory. * On return the grant reference array is filled with INVALID_GRANT_REF. */ void xenbus_teardown_ring(void **vaddr, unsigned int nr_pages, grant_ref_t *grefs) { unsigned int i; for (i = 0; i < nr_pages; i++) { if (grefs[i] != INVALID_GRANT_REF) { gnttab_end_foreign_access(grefs[i], NULL); grefs[i] = INVALID_GRANT_REF; } } if (*vaddr) free_pages_exact(*vaddr, nr_pages * XEN_PAGE_SIZE); *vaddr = NULL; } EXPORT_SYMBOL_GPL(xenbus_teardown_ring); /* * Allocate an event channel for the given xenbus_device, assigning the newly * created local port to *port. Return 0 on success, or -errno on error. On * error, the device will switch to XenbusStateClosing, and the error will be * saved in the store. */ int xenbus_alloc_evtchn(struct xenbus_device *dev, evtchn_port_t *port) { struct evtchn_alloc_unbound alloc_unbound; int err; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = dev->otherend_id; err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (err) xenbus_dev_fatal(dev, err, "allocating event channel"); else *port = alloc_unbound.port; return err; } EXPORT_SYMBOL_GPL(xenbus_alloc_evtchn); /* * Free an existing event channel. Returns 0 on success or -errno on error. */ int xenbus_free_evtchn(struct xenbus_device *dev, evtchn_port_t port) { struct evtchn_close close; int err; close.port = port; err = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); if (err) xenbus_dev_error(dev, err, "freeing event channel %u", port); return err; } EXPORT_SYMBOL_GPL(xenbus_free_evtchn); /** * xenbus_map_ring_valloc - allocate & map pages of VA space * @dev: xenbus device * @gnt_refs: grant reference array * @nr_grefs: number of grant references * @vaddr: pointer to address to be filled out by mapping * * Map @nr_grefs pages of memory into this domain from another * domain's grant table. xenbus_map_ring_valloc allocates @nr_grefs * pages of virtual address space, maps the pages to that address, and sets * *vaddr to that address. If an error is returned, device will switch to * XenbusStateClosing and the error message will be saved in XenStore. * * Returns: %0 on success or -errno on error */ int xenbus_map_ring_valloc(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr) { int err; struct map_ring_valloc *info; *vaddr = NULL; if (nr_grefs > XENBUS_MAX_RING_GRANTS) return -EINVAL; info = kzalloc(sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->node = kzalloc(sizeof(*info->node), GFP_KERNEL); if (!info->node) err = -ENOMEM; else err = ring_ops->map(dev, info, gnt_refs, nr_grefs, vaddr); kfree(info->node); kfree(info); return err; } EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc); /* N.B. sizeof(phys_addr_t) doesn't always equal to sizeof(unsigned * long), e.g. 32-on-64. Caller is responsible for preparing the * right array to feed into this function */ static int __xenbus_map_ring(struct xenbus_device *dev, grant_ref_t *gnt_refs, unsigned int nr_grefs, grant_handle_t *handles, struct map_ring_valloc *info, unsigned int flags, bool *leaked) { int i, j; if (nr_grefs > XENBUS_MAX_RING_GRANTS) return -EINVAL; for (i = 0; i < nr_grefs; i++) { gnttab_set_map_op(&info->map[i], info->phys_addrs[i], flags, gnt_refs[i], dev->otherend_id); handles[i] = INVALID_GRANT_HANDLE; } gnttab_batch_map(info->map, i); for (i = 0; i < nr_grefs; i++) { if (info->map[i].status != GNTST_okay) { xenbus_dev_fatal(dev, info->map[i].status, "mapping in shared page %d from domain %d", gnt_refs[i], dev->otherend_id); goto fail; } else handles[i] = info->map[i].handle; } return 0; fail: for (i = j = 0; i < nr_grefs; i++) { if (handles[i] != INVALID_GRANT_HANDLE) { gnttab_set_unmap_op(&info->unmap[j], info->phys_addrs[i], GNTMAP_host_map, handles[i]); j++; } } BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, info->unmap, j)); *leaked = false; for (i = 0; i < j; i++) { if (info->unmap[i].status != GNTST_okay) { *leaked = true; break; } } return -ENOENT; } /** * xenbus_unmap_ring - unmap memory from another domain * @dev: xenbus device * @handles: grant handle array * @nr_handles: number of handles in the array * @vaddrs: addresses to unmap * * Unmap memory in this domain that was imported from another domain. * * Returns: %0 on success or GNTST_* on error * (see xen/include/interface/grant_table.h). */ static int xenbus_unmap_ring(struct xenbus_device *dev, grant_handle_t *handles, unsigned int nr_handles, unsigned long *vaddrs) { struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; int i; int err; if (nr_handles > XENBUS_MAX_RING_GRANTS) return -EINVAL; for (i = 0; i < nr_handles; i++) gnttab_set_unmap_op(&unmap[i], vaddrs[i], GNTMAP_host_map, handles[i]); BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)); err = GNTST_okay; for (i = 0; i < nr_handles; i++) { if (unmap[i].status != GNTST_okay) { xenbus_dev_error(dev, unmap[i].status, "unmapping page at handle %d error %d", handles[i], unmap[i].status); err = unmap[i].status; break; } } return err; } static void xenbus_map_ring_setup_grant_hvm(unsigned long gfn, unsigned int goffset, unsigned int len, void *data) { struct map_ring_valloc *info = data; unsigned long vaddr = (unsigned long)gfn_to_virt(gfn); info->phys_addrs[info->idx] = vaddr; info->addrs[info->idx] = vaddr; info->idx++; } static int xenbus_map_ring_hvm(struct xenbus_device *dev, struct map_ring_valloc *info, grant_ref_t *gnt_ref, unsigned int nr_grefs, void **vaddr) { struct xenbus_map_node *node = info->node; int err; void *addr; bool leaked = false; unsigned int nr_pages = XENBUS_PAGES(nr_grefs); err = xen_alloc_unpopulated_pages(nr_pages, node->hvm.pages); if (err) goto out_err; gnttab_foreach_grant(node->hvm.pages, nr_grefs, xenbus_map_ring_setup_grant_hvm, info); err = __xenbus_map_ring(dev, gnt_ref, nr_grefs, node->handles, info, GNTMAP_host_map, &leaked); node->nr_handles = nr_grefs; if (err) goto out_free_ballooned_pages; addr = vmap(node->hvm.pages, nr_pages, VM_MAP | VM_IOREMAP, PAGE_KERNEL); if (!addr) { err = -ENOMEM; goto out_xenbus_unmap_ring; } node->hvm.addr = addr; spin_lock(&xenbus_valloc_lock); list_add(&node->next, &xenbus_valloc_pages); spin_unlock(&xenbus_valloc_lock); *vaddr = addr; info->node = NULL; return 0; out_xenbus_unmap_ring: if (!leaked) xenbus_unmap_ring(dev, node->handles, nr_grefs, info->addrs); else pr_alert("leaking %p size %u page(s)", addr, nr_pages); out_free_ballooned_pages: if (!leaked) xen_free_unpopulated_pages(nr_pages, node->hvm.pages); out_err: return err; } /** * xenbus_unmap_ring_vfree - unmap a page of memory from another domain * @dev: xenbus device * @vaddr: addr to unmap * * Based on Rusty Russell's skeleton driver's unmap_page. * Unmap a page of memory in this domain that was imported from another domain. * Use xenbus_unmap_ring_vfree if you mapped in your memory with * xenbus_map_ring_valloc (it will free the virtual address space). * * Returns: %0 on success or GNTST_* on error * (see xen/include/interface/grant_table.h). */ int xenbus_unmap_ring_vfree(struct xenbus_device *dev, void *vaddr) { return ring_ops->unmap(dev, vaddr); } EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree); #ifdef CONFIG_XEN_PV static int map_ring_apply(pte_t *pte, unsigned long addr, void *data) { struct map_ring_valloc *info = data; info->phys_addrs[info->idx++] = arbitrary_virt_to_machine(pte).maddr; return 0; } static int xenbus_map_ring_pv(struct xenbus_device *dev, struct map_ring_valloc *info, grant_ref_t *gnt_refs, unsigned int nr_grefs, void **vaddr) { struct xenbus_map_node *node = info->node; struct vm_struct *area; bool leaked = false; int err = -ENOMEM; area = get_vm_area(XEN_PAGE_SIZE * nr_grefs, VM_IOREMAP); if (!area) return -ENOMEM; if (apply_to_page_range(&init_mm, (unsigned long)area->addr, XEN_PAGE_SIZE * nr_grefs, map_ring_apply, info)) goto failed; err = __xenbus_map_ring(dev, gnt_refs, nr_grefs, node->handles, info, GNTMAP_host_map | GNTMAP_contains_pte, &leaked); if (err) goto failed; node->nr_handles = nr_grefs; node->pv.area = area; spin_lock(&xenbus_valloc_lock); list_add(&node->next, &xenbus_valloc_pages); spin_unlock(&xenbus_valloc_lock); *vaddr = area->addr; info->node = NULL; return 0; failed: if (!leaked) free_vm_area(area); else pr_alert("leaking VM area %p size %u page(s)", area, nr_grefs); return err; } static int xenbus_unmap_ring_pv(struct xenbus_device *dev, void *vaddr) { struct xenbus_map_node *node; struct gnttab_unmap_grant_ref unmap[XENBUS_MAX_RING_GRANTS]; unsigned int level; int i; bool leaked = false; int err; spin_lock(&xenbus_valloc_lock); list_for_each_entry(node, &xenbus_valloc_pages, next) { if (node->pv.area->addr == vaddr) { list_del(&node->next); goto found; } } node = NULL; found: spin_unlock(&xenbus_valloc_lock); if (!node) { xenbus_dev_error(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } for (i = 0; i < node->nr_handles; i++) { unsigned long addr; memset(&unmap[i], 0, sizeof(unmap[i])); addr = (unsigned long)vaddr + (XEN_PAGE_SIZE * i); unmap[i].host_addr = arbitrary_virt_to_machine( lookup_address(addr, &level)).maddr; unmap[i].dev_bus_addr = 0; unmap[i].handle = node->handles[i]; } BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, i)); err = GNTST_okay; leaked = false; for (i = 0; i < node->nr_handles; i++) { if (unmap[i].status != GNTST_okay) { leaked = true; xenbus_dev_error(dev, unmap[i].status, "unmapping page at handle %d error %d", node->handles[i], unmap[i].status); err = unmap[i].status; break; } } if (!leaked) free_vm_area(node->pv.area); else pr_alert("leaking VM area %p size %u page(s)", node->pv.area, node->nr_handles); kfree(node); return err; } static const struct xenbus_ring_ops ring_ops_pv = { .map = xenbus_map_ring_pv, .unmap = xenbus_unmap_ring_pv, }; #endif struct unmap_ring_hvm { unsigned int idx; unsigned long addrs[XENBUS_MAX_RING_GRANTS]; }; static void xenbus_unmap_ring_setup_grant_hvm(unsigned long gfn, unsigned int goffset, unsigned int len, void *data) { struct unmap_ring_hvm *info = data; info->addrs[info->idx] = (unsigned long)gfn_to_virt(gfn); info->idx++; } static int xenbus_unmap_ring_hvm(struct xenbus_device *dev, void *vaddr) { int rv; struct xenbus_map_node *node; void *addr; struct unmap_ring_hvm info = { .idx = 0, }; unsigned int nr_pages; spin_lock(&xenbus_valloc_lock); list_for_each_entry(node, &xenbus_valloc_pages, next) { addr = node->hvm.addr; if (addr == vaddr) { list_del(&node->next); goto found; } } node = addr = NULL; found: spin_unlock(&xenbus_valloc_lock); if (!node) { xenbus_dev_error(dev, -ENOENT, "can't find mapped virtual address %p", vaddr); return GNTST_bad_virt_addr; } nr_pages = XENBUS_PAGES(node->nr_handles); gnttab_foreach_grant(node->hvm.pages, node->nr_handles, xenbus_unmap_ring_setup_grant_hvm, &info); rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, info.addrs); if (!rv) { vunmap(vaddr); xen_free_unpopulated_pages(nr_pages, node->hvm.pages); } else WARN(1, "Leaking %p, size %u page(s)\n", vaddr, nr_pages); kfree(node); return rv; } /** * xenbus_read_driver_state - read state from a store path * @path: path for driver * * Returns: the state of the driver rooted at the given store path, or * XenbusStateUnknown if no state can be read. */ enum xenbus_state xenbus_read_driver_state(const char *path) { enum xenbus_state result; int err = xenbus_gather(XBT_NIL, path, "state", "%d", &result, NULL); if (err) result = XenbusStateUnknown; return result; } EXPORT_SYMBOL_GPL(xenbus_read_driver_state); static const struct xenbus_ring_ops ring_ops_hvm = { .map = xenbus_map_ring_hvm, .unmap = xenbus_unmap_ring_hvm, }; void __init xenbus_ring_ops_init(void) { #ifdef CONFIG_XEN_PV if (!xen_feature(XENFEAT_auto_translated_physmap)) ring_ops = &ring_ops_pv; else #endif ring_ops = &ring_ops_hvm; }
// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for EMS Dr. Thomas Wuensche CPC-USB/ARM7 * * Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche */ #include <linux/ethtool.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> MODULE_AUTHOR("Sebastian Haas <[email protected]>"); MODULE_DESCRIPTION("CAN driver for EMS Dr. Thomas Wuensche CAN/USB interfaces"); MODULE_LICENSE("GPL v2"); /* Control-Values for CPC_Control() Command Subject Selection */ #define CONTR_CAN_MESSAGE 0x04 #define CONTR_CAN_STATE 0x0C #define CONTR_BUS_ERROR 0x1C /* Control Command Actions */ #define CONTR_CONT_OFF 0 #define CONTR_CONT_ON 1 #define CONTR_ONCE 2 /* Messages from CPC to PC */ #define CPC_MSG_TYPE_CAN_FRAME 1 /* CAN data frame */ #define CPC_MSG_TYPE_RTR_FRAME 8 /* CAN remote frame */ #define CPC_MSG_TYPE_CAN_PARAMS 12 /* Actual CAN parameters */ #define CPC_MSG_TYPE_CAN_STATE 14 /* CAN state message */ #define CPC_MSG_TYPE_EXT_CAN_FRAME 16 /* Extended CAN data frame */ #define CPC_MSG_TYPE_EXT_RTR_FRAME 17 /* Extended remote frame */ #define CPC_MSG_TYPE_CONTROL 19 /* change interface behavior */ #define CPC_MSG_TYPE_CONFIRM 20 /* command processed confirmation */ #define CPC_MSG_TYPE_OVERRUN 21 /* overrun events */ #define CPC_MSG_TYPE_CAN_FRAME_ERROR 23 /* detected bus errors */ #define CPC_MSG_TYPE_ERR_COUNTER 25 /* RX/TX error counter */ /* Messages from the PC to the CPC interface */ #define CPC_CMD_TYPE_CAN_FRAME 1 /* CAN data frame */ #define CPC_CMD_TYPE_CONTROL 3 /* control of interface behavior */ #define CPC_CMD_TYPE_CAN_PARAMS 6 /* set CAN parameters */ #define CPC_CMD_TYPE_RTR_FRAME 13 /* CAN remote frame */ #define CPC_CMD_TYPE_CAN_STATE 14 /* CAN state message */ #define CPC_CMD_TYPE_EXT_CAN_FRAME 15 /* Extended CAN data frame */ #define CPC_CMD_TYPE_EXT_RTR_FRAME 16 /* Extended CAN remote frame */ #define CPC_CMD_TYPE_CAN_EXIT 200 /* exit the CAN */ #define CPC_CMD_TYPE_INQ_ERR_COUNTER 25 /* request the CAN error counters */ #define CPC_CMD_TYPE_CLEAR_MSG_QUEUE 8 /* clear CPC_MSG queue */ #define CPC_CMD_TYPE_CLEAR_CMD_QUEUE 28 /* clear CPC_CMD queue */ #define CPC_CC_TYPE_SJA1000 2 /* Philips basic CAN controller */ #define CPC_CAN_ECODE_ERRFRAME 0x01 /* Ecode type */ /* Overrun types */ #define CPC_OVR_EVENT_CAN 0x01 #define CPC_OVR_EVENT_CANSTATE 0x02 #define CPC_OVR_EVENT_BUSERROR 0x04 /* * If the CAN controller lost a message we indicate it with the highest bit * set in the count field. */ #define CPC_OVR_HW 0x80 /* Size of the "struct ems_cpc_msg" without the union */ #define CPC_MSG_HEADER_LEN 11 #define CPC_CAN_MSG_MIN_SIZE 5 /* Define these values to match your devices */ #define USB_CPCUSB_VENDOR_ID 0x12D6 #define USB_CPCUSB_ARM7_PRODUCT_ID 0x0444 /* Mode register NXP LPC2119/SJA1000 CAN Controller */ #define SJA1000_MOD_NORMAL 0x00 #define SJA1000_MOD_RM 0x01 /* ECC register NXP LPC2119/SJA1000 CAN Controller */ #define SJA1000_ECC_SEG 0x1F #define SJA1000_ECC_DIR 0x20 #define SJA1000_ECC_ERR 0x06 #define SJA1000_ECC_BIT 0x00 #define SJA1000_ECC_FORM 0x40 #define SJA1000_ECC_STUFF 0x80 #define SJA1000_ECC_MASK 0xc0 /* Status register content */ #define SJA1000_SR_BS 0x80 #define SJA1000_SR_ES 0x40 #define SJA1000_DEFAULT_OUTPUT_CONTROL 0xDA /* * The device actually uses a 16MHz clock to generate the CAN clock * but it expects SJA1000 bit settings based on 8MHz (is internally * converted). */ #define EMS_USB_ARM7_CLOCK 8000000 #define CPC_TX_QUEUE_TRIGGER_LOW 25 #define CPC_TX_QUEUE_TRIGGER_HIGH 35 /* * CAN-Message representation in a CPC_MSG. Message object type is * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or * CPC_MSG_TYPE_EXT_CAN_FRAME or CPC_MSG_TYPE_EXT_RTR_FRAME. */ struct cpc_can_msg { __le32 id; u8 length; u8 msg[8]; }; /* Representation of the CAN parameters for the SJA1000 controller */ struct cpc_sja1000_params { u8 mode; u8 acc_code0; u8 acc_code1; u8 acc_code2; u8 acc_code3; u8 acc_mask0; u8 acc_mask1; u8 acc_mask2; u8 acc_mask3; u8 btr0; u8 btr1; u8 outp_contr; }; /* CAN params message representation */ struct cpc_can_params { u8 cc_type; /* Will support M16C CAN controller in the future */ union { struct cpc_sja1000_params sja1000; } cc_params; }; /* Structure for confirmed message handling */ struct cpc_confirm { u8 error; /* error code */ }; /* Structure for overrun conditions */ struct cpc_overrun { u8 event; u8 count; }; /* SJA1000 CAN errors (compatible to NXP LPC2119) */ struct cpc_sja1000_can_error { u8 ecc; u8 rxerr; u8 txerr; }; /* structure for CAN error conditions */ struct cpc_can_error { u8 ecode; struct { u8 cc_type; /* Other controllers may also provide error code capture regs */ union { struct cpc_sja1000_can_error sja1000; } regs; } cc; }; /* * Structure containing RX/TX error counter. This structure is used to request * the values of the CAN controllers TX and RX error counter. */ struct cpc_can_err_counter { u8 rx; u8 tx; }; /* Main message type used between library and application */ struct __packed ems_cpc_msg { u8 type; /* type of message */ u8 length; /* length of data within union 'msg' */ u8 msgid; /* confirmation handle */ __le32 ts_sec; /* timestamp in seconds */ __le32 ts_nsec; /* timestamp in nano seconds */ union __packed { u8 generic[64]; struct cpc_can_msg can_msg; struct cpc_can_params can_params; struct cpc_confirm confirmation; struct cpc_overrun overrun; struct cpc_can_error error; struct cpc_can_err_counter err_counter; u8 can_state; } msg; }; /* * Table of devices that work with this driver * NOTE: This driver supports only CPC-USB/ARM7 (LPC2119) yet. */ static struct usb_device_id ems_usb_table[] = { {USB_DEVICE(USB_CPCUSB_VENDOR_ID, USB_CPCUSB_ARM7_PRODUCT_ID)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ems_usb_table); #define RX_BUFFER_SIZE 64 #define CPC_HEADER_SIZE 4 #define INTR_IN_BUFFER_SIZE 4 #define MAX_RX_URBS 10 #define MAX_TX_URBS 10 struct ems_usb; struct ems_tx_urb_context { struct ems_usb *dev; u32 echo_index; }; struct ems_usb { struct can_priv can; /* must be the first member */ struct sk_buff *echo_skb[MAX_TX_URBS]; struct usb_device *udev; struct net_device *netdev; atomic_t active_tx_urbs; struct usb_anchor tx_submitted; struct ems_tx_urb_context tx_contexts[MAX_TX_URBS]; struct usb_anchor rx_submitted; struct urb *intr_urb; u8 *tx_msg_buffer; u8 *intr_in_buffer; unsigned int free_slots; /* remember number of available slots */ struct ems_cpc_msg active_params; /* active controller parameters */ void *rxbuf[MAX_RX_URBS]; dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; static void ems_usb_read_interrupt_callback(struct urb *urb) { struct ems_usb *dev = urb->context; struct net_device *netdev = dev->netdev; int err; if (!netif_device_present(netdev)) return; switch (urb->status) { case 0: dev->free_slots = dev->intr_in_buffer[1]; if (dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH && netif_queue_stopped(netdev)) netif_wake_queue(netdev); break; case -ECONNRESET: /* unlink */ case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: netdev_info(netdev, "Rx interrupt aborted %d\n", urb->status); break; } err = usb_submit_urb(urb, GFP_ATOMIC); if (err == -ENODEV) netif_device_detach(netdev); else if (err) netdev_err(netdev, "failed resubmitting intr urb: %d\n", err); } static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) { struct can_frame *cf; struct sk_buff *skb; int i; struct net_device_stats *stats = &dev->netdev->stats; skb = alloc_can_skb(dev->netdev, &cf); if (skb == NULL) return; cf->can_id = le32_to_cpu(msg->msg.can_msg.id); cf->len = can_cc_dlc2len(msg->msg.can_msg.length & 0xF); if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) cf->can_id |= CAN_EFF_FLAG; if (msg->type == CPC_MSG_TYPE_RTR_FRAME || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) { cf->can_id |= CAN_RTR_FLAG; } else { for (i = 0; i < cf->len; i++) cf->data[i] = msg->msg.can_msg.msg[i]; stats->rx_bytes += cf->len; } stats->rx_packets++; netif_rx(skb); } static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &dev->netdev->stats; skb = alloc_can_err_skb(dev->netdev, &cf); if (msg->type == CPC_MSG_TYPE_CAN_STATE) { u8 state = msg->msg.can_state; if (state & SJA1000_SR_BS) { dev->can.state = CAN_STATE_BUS_OFF; if (skb) cf->can_id |= CAN_ERR_BUSOFF; dev->can.can_stats.bus_off++; can_bus_off(dev->netdev); } else if (state & SJA1000_SR_ES) { dev->can.state = CAN_STATE_ERROR_WARNING; dev->can.can_stats.error_warning++; } else { dev->can.state = CAN_STATE_ERROR_ACTIVE; dev->can.can_stats.error_passive++; } } else if (msg->type == CPC_MSG_TYPE_CAN_FRAME_ERROR) { u8 ecc = msg->msg.error.cc.regs.sja1000.ecc; u8 txerr = msg->msg.error.cc.regs.sja1000.txerr; u8 rxerr = msg->msg.error.cc.regs.sja1000.rxerr; /* bus error interrupt */ dev->can.can_stats.bus_error++; if (skb) { cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (ecc & SJA1000_ECC_MASK) { case SJA1000_ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; break; case SJA1000_ECC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case SJA1000_ECC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: cf->data[3] = ecc & SJA1000_ECC_SEG; break; } } /* Error occurred during transmission? */ if ((ecc & SJA1000_ECC_DIR) == 0) { stats->tx_errors++; if (skb) cf->data[2] |= CAN_ERR_PROT_TX; } else { stats->rx_errors++; } if (skb && (dev->can.state == CAN_STATE_ERROR_WARNING || dev->can.state == CAN_STATE_ERROR_PASSIVE)) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } } else if (msg->type == CPC_MSG_TYPE_OVERRUN) { if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; } stats->rx_over_errors++; stats->rx_errors++; } if (skb) netif_rx(skb); } /* * callback for bulk IN urb */ static void ems_usb_read_bulk_callback(struct urb *urb) { struct ems_usb *dev = urb->context; struct net_device *netdev; int retval; netdev = dev->netdev; if (!netif_device_present(netdev)) return; switch (urb->status) { case 0: /* success */ break; case -ENOENT: return; default: netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status); goto resubmit_urb; } if (urb->actual_length > CPC_HEADER_SIZE) { struct ems_cpc_msg *msg; u8 *ibuf = urb->transfer_buffer; u8 msg_count, start; msg_count = ibuf[0] & ~0x80; start = CPC_HEADER_SIZE; while (msg_count) { msg = (struct ems_cpc_msg *)&ibuf[start]; switch (msg->type) { case CPC_MSG_TYPE_CAN_STATE: /* Process CAN state changes */ ems_usb_rx_err(dev, msg); break; case CPC_MSG_TYPE_CAN_FRAME: case CPC_MSG_TYPE_EXT_CAN_FRAME: case CPC_MSG_TYPE_RTR_FRAME: case CPC_MSG_TYPE_EXT_RTR_FRAME: ems_usb_rx_can_msg(dev, msg); break; case CPC_MSG_TYPE_CAN_FRAME_ERROR: /* Process errorframe */ ems_usb_rx_err(dev, msg); break; case CPC_MSG_TYPE_OVERRUN: /* Message lost while receiving */ ems_usb_rx_err(dev, msg); break; } start += CPC_MSG_HEADER_LEN + msg->length; msg_count--; if (start > urb->transfer_buffer_length) { netdev_err(netdev, "format error\n"); break; } } } resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2), urb->transfer_buffer, RX_BUFFER_SIZE, ems_usb_read_bulk_callback, dev); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval == -ENODEV) netif_device_detach(netdev); else if (retval) netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", retval); } /* * callback for bulk IN urb */ static void ems_usb_write_bulk_callback(struct urb *urb) { struct ems_tx_urb_context *context = urb->context; struct ems_usb *dev; struct net_device *netdev; BUG_ON(!context); dev = context->dev; netdev = dev->netdev; /* free up our allocated buffer */ usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); atomic_dec(&dev->active_tx_urbs); if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); netif_trans_update(netdev); /* transmission complete interrupt */ netdev->stats.tx_packets++; netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL); /* Release context */ context->echo_index = MAX_TX_URBS; } /* * Send the given CPC command synchronously */ static int ems_usb_command_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) { int actual_length; /* Copy payload */ memcpy(&dev->tx_msg_buffer[CPC_HEADER_SIZE], msg, msg->length + CPC_MSG_HEADER_LEN); /* Clear header */ memset(&dev->tx_msg_buffer[0], 0, CPC_HEADER_SIZE); return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), &dev->tx_msg_buffer[0], msg->length + CPC_MSG_HEADER_LEN + CPC_HEADER_SIZE, &actual_length, 1000); } /* * Change CAN controllers' mode register */ static int ems_usb_write_mode(struct ems_usb *dev, u8 mode) { dev->active_params.msg.can_params.cc_params.sja1000.mode = mode; return ems_usb_command_msg(dev, &dev->active_params); } /* * Send a CPC_Control command to change behaviour when interface receives a CAN * message, bus error or CAN state changed notifications. */ static int ems_usb_control_cmd(struct ems_usb *dev, u8 val) { struct ems_cpc_msg cmd; cmd.type = CPC_CMD_TYPE_CONTROL; cmd.length = CPC_MSG_HEADER_LEN + 1; cmd.msgid = 0; cmd.msg.generic[0] = val; return ems_usb_command_msg(dev, &cmd); } /* * Start interface */ static int ems_usb_start(struct ems_usb *dev) { struct net_device *netdev = dev->netdev; int err, i; dev->intr_in_buffer[0] = 0; dev->free_slots = 50; /* initial size */ for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf = NULL; dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { err = -ENOMEM; break; } buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } urb->transfer_dma = buf_dma; usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2), buf, RX_BUFFER_SIZE, ems_usb_read_bulk_callback, dev); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, urb->transfer_dma); usb_free_urb(urb); break; } dev->rxbuf[i] = buf; dev->rxbuf_dma[i] = buf_dma; /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* Did we submit any URBs */ if (i == 0) { netdev_warn(netdev, "couldn't setup read URBs\n"); return err; } /* Warn if we've couldn't transmit all the URBs */ if (i < MAX_RX_URBS) netdev_warn(netdev, "rx performance may be slow\n"); /* Setup and start interrupt URB */ usb_fill_int_urb(dev->intr_urb, dev->udev, usb_rcvintpipe(dev->udev, 1), dev->intr_in_buffer, INTR_IN_BUFFER_SIZE, ems_usb_read_interrupt_callback, dev, 1); err = usb_submit_urb(dev->intr_urb, GFP_KERNEL); if (err) { netdev_warn(netdev, "intr URB submit failed: %d\n", err); return err; } /* CPC-USB will transfer received message to host */ err = ems_usb_control_cmd(dev, CONTR_CAN_MESSAGE | CONTR_CONT_ON); if (err) goto failed; /* CPC-USB will transfer CAN state changes to host */ err = ems_usb_control_cmd(dev, CONTR_CAN_STATE | CONTR_CONT_ON); if (err) goto failed; /* CPC-USB will transfer bus errors to host */ err = ems_usb_control_cmd(dev, CONTR_BUS_ERROR | CONTR_CONT_ON); if (err) goto failed; err = ems_usb_write_mode(dev, SJA1000_MOD_NORMAL); if (err) goto failed; dev->can.state = CAN_STATE_ERROR_ACTIVE; return 0; failed: netdev_warn(netdev, "couldn't submit control: %d\n", err); return err; } static void unlink_all_urbs(struct ems_usb *dev) { int i; usb_unlink_urb(dev->intr_urb); usb_kill_anchored_urbs(&dev->rx_submitted); for (i = 0; i < MAX_RX_URBS; ++i) usb_free_coherent(dev->udev, RX_BUFFER_SIZE, dev->rxbuf[i], dev->rxbuf_dma[i]); usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); for (i = 0; i < MAX_TX_URBS; i++) dev->tx_contexts[i].echo_index = MAX_TX_URBS; } static int ems_usb_open(struct net_device *netdev) { struct ems_usb *dev = netdev_priv(netdev); int err; err = ems_usb_write_mode(dev, SJA1000_MOD_RM); if (err) return err; /* common open */ err = open_candev(netdev); if (err) return err; /* finally start device */ err = ems_usb_start(dev); if (err) { if (err == -ENODEV) netif_device_detach(dev->netdev); netdev_warn(netdev, "couldn't start device: %d\n", err); close_candev(netdev); return err; } netif_start_queue(netdev); return 0; } static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ems_usb *dev = netdev_priv(netdev); struct ems_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; struct ems_cpc_msg *msg; struct urb *urb; u8 *buf; int i, err; size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN + sizeof(struct cpc_can_msg); if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto nomem; buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); goto nomem; } msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE]; msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); msg->msg.can_msg.length = cf->len; if (cf->can_id & CAN_RTR_FLAG) { msg->type = cf->can_id & CAN_EFF_FLAG ? CPC_CMD_TYPE_EXT_RTR_FRAME : CPC_CMD_TYPE_RTR_FRAME; msg->length = CPC_CAN_MSG_MIN_SIZE; } else { msg->type = cf->can_id & CAN_EFF_FLAG ? CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME; for (i = 0; i < cf->len; i++) msg->msg.can_msg.msg[i] = cf->data[i]; msg->length = CPC_CAN_MSG_MIN_SIZE + cf->len; } for (i = 0; i < MAX_TX_URBS; i++) { if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &dev->tx_contexts[i]; break; } } /* * May never happen! When this happens we'd more URBs in flight as * allowed (MAX_TX_URBS). */ if (!context) { usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); usb_free_urb(urb); netdev_warn(netdev, "couldn't find free context\n"); return NETDEV_TX_BUSY; } context->dev = dev; context->echo_index = i; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, size, ems_usb_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index, 0); atomic_inc(&dev->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { can_free_echo_skb(netdev, context->echo_index, NULL); usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); atomic_dec(&dev->active_tx_urbs); if (err == -ENODEV) { netif_device_detach(netdev); } else { netdev_warn(netdev, "failed tx_urb %d\n", err); stats->tx_dropped++; } } else { netif_trans_update(netdev); /* Slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) { netif_stop_queue(netdev); } } /* * Release our reference to this URB, the USB core will eventually free * it entirely. */ usb_free_urb(urb); return NETDEV_TX_OK; nomem: dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } static int ems_usb_close(struct net_device *netdev) { struct ems_usb *dev = netdev_priv(netdev); /* Stop polling */ unlink_all_urbs(dev); netif_stop_queue(netdev); /* Set CAN controller to reset mode */ if (ems_usb_write_mode(dev, SJA1000_MOD_RM)) netdev_warn(netdev, "couldn't stop device"); close_candev(netdev); return 0; } static const struct net_device_ops ems_usb_netdev_ops = { .ndo_open = ems_usb_open, .ndo_stop = ems_usb_close, .ndo_start_xmit = ems_usb_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ems_usb_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct can_bittiming_const ems_usb_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; static int ems_usb_set_mode(struct net_device *netdev, enum can_mode mode) { struct ems_usb *dev = netdev_priv(netdev); switch (mode) { case CAN_MODE_START: if (ems_usb_write_mode(dev, SJA1000_MOD_NORMAL)) netdev_warn(netdev, "couldn't start device"); if (netif_queue_stopped(netdev)) netif_wake_queue(netdev); break; default: return -EOPNOTSUPP; } return 0; } static int ems_usb_set_bittiming(struct net_device *netdev) { struct ems_usb *dev = netdev_priv(netdev); struct can_bittiming *bt = &dev->can.bittiming; u8 btr0, btr1; btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | (((bt->phase_seg2 - 1) & 0x7) << 4); if (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= 0x80; netdev_info(netdev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); dev->active_params.msg.can_params.cc_params.sja1000.btr0 = btr0; dev->active_params.msg.can_params.cc_params.sja1000.btr1 = btr1; return ems_usb_command_msg(dev, &dev->active_params); } static void init_params_sja1000(struct ems_cpc_msg *msg) { struct cpc_sja1000_params *sja1000 = &msg->msg.can_params.cc_params.sja1000; msg->type = CPC_CMD_TYPE_CAN_PARAMS; msg->length = sizeof(struct cpc_can_params); msg->msgid = 0; msg->msg.can_params.cc_type = CPC_CC_TYPE_SJA1000; /* Acceptance filter open */ sja1000->acc_code0 = 0x00; sja1000->acc_code1 = 0x00; sja1000->acc_code2 = 0x00; sja1000->acc_code3 = 0x00; /* Acceptance filter open */ sja1000->acc_mask0 = 0xFF; sja1000->acc_mask1 = 0xFF; sja1000->acc_mask2 = 0xFF; sja1000->acc_mask3 = 0xFF; sja1000->btr0 = 0; sja1000->btr1 = 0; sja1000->outp_contr = SJA1000_DEFAULT_OUTPUT_CONTROL; sja1000->mode = SJA1000_MOD_RM; } /* * probe function for new CPC-USB devices */ static int ems_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct net_device *netdev; struct ems_usb *dev; int i, err = -ENOMEM; netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n"); return -ENOMEM; } dev = netdev_priv(netdev); dev->udev = interface_to_usbdev(intf); dev->netdev = netdev; dev->can.state = CAN_STATE_STOPPED; dev->can.clock.freq = EMS_USB_ARM7_CLOCK; dev->can.bittiming_const = &ems_usb_bittiming_const; dev->can.do_set_bittiming = ems_usb_set_bittiming; dev->can.do_set_mode = ems_usb_set_mode; dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; netdev->netdev_ops = &ems_usb_netdev_ops; netdev->ethtool_ops = &ems_usb_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ init_usb_anchor(&dev->rx_submitted); init_usb_anchor(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); for (i = 0; i < MAX_TX_URBS; i++) dev->tx_contexts[i].echo_index = MAX_TX_URBS; dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->intr_urb) goto cleanup_candev; dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); if (!dev->intr_in_buffer) goto cleanup_intr_urb; dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + sizeof(struct ems_cpc_msg), GFP_KERNEL); if (!dev->tx_msg_buffer) goto cleanup_intr_in_buffer; usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); init_params_sja1000(&dev->active_params); err = ems_usb_command_msg(dev, &dev->active_params); if (err) { netdev_err(netdev, "couldn't initialize controller: %d\n", err); goto cleanup_tx_msg_buffer; } err = register_candev(netdev); if (err) { netdev_err(netdev, "couldn't register CAN device: %d\n", err); goto cleanup_tx_msg_buffer; } return 0; cleanup_tx_msg_buffer: kfree(dev->tx_msg_buffer); cleanup_intr_in_buffer: kfree(dev->intr_in_buffer); cleanup_intr_urb: usb_free_urb(dev->intr_urb); cleanup_candev: free_candev(netdev); return err; } /* * called by the usb core when the device is removed from the system */ static void ems_usb_disconnect(struct usb_interface *intf) { struct ems_usb *dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (dev) { unregister_netdev(dev->netdev); unlink_all_urbs(dev); usb_free_urb(dev->intr_urb); kfree(dev->intr_in_buffer); kfree(dev->tx_msg_buffer); free_candev(dev->netdev); } } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver ems_usb_driver = { .name = KBUILD_MODNAME, .probe = ems_usb_probe, .disconnect = ems_usb_disconnect, .id_table = ems_usb_table, }; module_usb_driver(ems_usb_driver);
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) International Business Machines Corp., 2006 * * Author: Artem Bityutskiy (Битюцкий Артём), Joern Engel */ /* * This is a small driver which implements fake MTD devices on top of UBI * volumes. This sounds strange, but it is in fact quite useful to make * MTD-oriented software (including all the legacy software) work on top of * UBI. * * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit * size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The * eraseblock size is equivalent to the logical eraseblock size of the volume. */ #include <linux/err.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/mtd/ubi.h> #include <linux/mtd/mtd.h> #include "ubi-media.h" #define err_msg(fmt, ...) \ pr_err("gluebi (pid %d): %s: " fmt "\n", \ current->pid, __func__, ##__VA_ARGS__) /** * struct gluebi_device - a gluebi device description data structure. * @mtd: emulated MTD device description object * @refcnt: gluebi device reference count * @desc: UBI volume descriptor * @ubi_num: UBI device number this gluebi device works on * @vol_id: ID of UBI volume this gluebi device works on * @list: link in a list of gluebi devices */ struct gluebi_device { struct mtd_info mtd; int refcnt; struct ubi_volume_desc *desc; int ubi_num; int vol_id; struct list_head list; }; /* List of all gluebi devices */ static LIST_HEAD(gluebi_devices); static DEFINE_MUTEX(devices_mutex); /** * find_gluebi_nolock - find a gluebi device. * @ubi_num: UBI device number * @vol_id: volume ID * * This function seraches for gluebi device corresponding to UBI device * @ubi_num and UBI volume @vol_id. Returns the gluebi device description * object in case of success and %NULL in case of failure. The caller has to * have the &devices_mutex locked. */ static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id) { struct gluebi_device *gluebi; list_for_each_entry(gluebi, &gluebi_devices, list) if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id) return gluebi; return NULL; } /** * gluebi_get_device - get MTD device reference. * @mtd: the MTD device description object * * This function is called every time the MTD device is being opened and * implements the MTD get_device() operation. Returns zero in case of success * and a negative error code in case of failure. */ static int gluebi_get_device(struct mtd_info *mtd) { struct gluebi_device *gluebi; int ubi_mode = UBI_READONLY; if (mtd->flags & MTD_WRITEABLE) ubi_mode = UBI_READWRITE; gluebi = container_of(mtd, struct gluebi_device, mtd); mutex_lock(&devices_mutex); if (gluebi->refcnt > 0) { /* * The MTD device is already referenced and this is just one * more reference. MTD allows many users to open the same * volume simultaneously and do not distinguish between * readers/writers/exclusive/meta openers as UBI does. So we do * not open the UBI volume again - just increase the reference * counter and return. */ gluebi->refcnt += 1; mutex_unlock(&devices_mutex); return 0; } /* * This is the first reference to this UBI volume via the MTD device * interface. Open the corresponding volume in read-write mode. */ gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id, ubi_mode); if (IS_ERR(gluebi->desc)) { mutex_unlock(&devices_mutex); return PTR_ERR(gluebi->desc); } gluebi->refcnt += 1; mutex_unlock(&devices_mutex); return 0; } /** * gluebi_put_device - put MTD device reference. * @mtd: the MTD device description object * * This function is called every time the MTD device is being put. Returns * zero in case of success and a negative error code in case of failure. */ static void gluebi_put_device(struct mtd_info *mtd) { struct gluebi_device *gluebi; gluebi = container_of(mtd, struct gluebi_device, mtd); mutex_lock(&devices_mutex); gluebi->refcnt -= 1; if (gluebi->refcnt == 0) ubi_close_volume(gluebi->desc); mutex_unlock(&devices_mutex); } /** * gluebi_read - read operation of emulated MTD devices. * @mtd: MTD device description object * @from: absolute offset from where to read * @len: how many bytes to read * @retlen: count of read bytes is returned here * @buf: buffer to store the read data * * This function returns zero in case of success and a negative error code in * case of failure. */ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, unsigned char *buf) { int err = 0, lnum, offs, bytes_left; struct gluebi_device *gluebi; gluebi = container_of(mtd, struct gluebi_device, mtd); lnum = div_u64_rem(from, mtd->erasesize, &offs); bytes_left = len; while (bytes_left) { size_t to_read = mtd->erasesize - offs; if (to_read > bytes_left) to_read = bytes_left; err = ubi_read(gluebi->desc, lnum, buf, offs, to_read); if (err) break; lnum += 1; offs = 0; bytes_left -= to_read; buf += to_read; } *retlen = len - bytes_left; return err; } /** * gluebi_write - write operation of emulated MTD devices. * @mtd: MTD device description object * @to: absolute offset where to write * @len: how many bytes to write * @retlen: count of written bytes is returned here * @buf: buffer with data to write * * This function returns zero in case of success and a negative error code in * case of failure. */ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf) { int err = 0, lnum, offs, bytes_left; struct gluebi_device *gluebi; gluebi = container_of(mtd, struct gluebi_device, mtd); lnum = div_u64_rem(to, mtd->erasesize, &offs); if (len % mtd->writesize || offs % mtd->writesize) return -EINVAL; bytes_left = len; while (bytes_left) { size_t to_write = mtd->erasesize - offs; if (to_write > bytes_left) to_write = bytes_left; err = ubi_leb_write(gluebi->desc, lnum, buf, offs, to_write); if (err) break; lnum += 1; offs = 0; bytes_left -= to_write; buf += to_write; } *retlen = len - bytes_left; return err; } /** * gluebi_erase - erase operation of emulated MTD devices. * @mtd: the MTD device description object * @instr: the erase operation description * * This function calls the erase callback when finishes. Returns zero in case * of success and a negative error code in case of failure. */ static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) { int err, i, lnum, count; struct gluebi_device *gluebi; if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) return -EINVAL; lnum = mtd_div_by_eb(instr->addr, mtd); count = mtd_div_by_eb(instr->len, mtd); gluebi = container_of(mtd, struct gluebi_device, mtd); for (i = 0; i < count - 1; i++) { err = ubi_leb_unmap(gluebi->desc, lnum + i); if (err) goto out_err; } /* * MTD erase operations are synchronous, so we have to make sure the * physical eraseblock is wiped out. * * Thus, perform leb_erase instead of leb_unmap operation - leb_erase * will wait for the end of operations */ err = ubi_leb_erase(gluebi->desc, lnum + i); if (err) goto out_err; return 0; out_err: instr->fail_addr = (long long)lnum * mtd->erasesize; return err; } /** * gluebi_create - create a gluebi device for an UBI volume. * @di: UBI device description object * @vi: UBI volume description object * * This function is called when a new UBI volume is created in order to create * corresponding fake MTD device. Returns zero in case of success and a * negative error code in case of failure. */ static int gluebi_create(struct ubi_device_info *di, struct ubi_volume_info *vi) { struct gluebi_device *gluebi, *g; struct mtd_info *mtd; gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL); if (!gluebi) return -ENOMEM; mtd = &gluebi->mtd; mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL); if (!mtd->name) { kfree(gluebi); return -ENOMEM; } gluebi->vol_id = vi->vol_id; gluebi->ubi_num = vi->ubi_num; mtd->type = MTD_UBIVOLUME; if (!di->ro_mode) mtd->flags = MTD_WRITEABLE; mtd->owner = THIS_MODULE; mtd->writesize = di->min_io_size; mtd->erasesize = vi->usable_leb_size; mtd->_read = gluebi_read; mtd->_write = gluebi_write; mtd->_erase = gluebi_erase; mtd->_get_device = gluebi_get_device; mtd->_put_device = gluebi_put_device; /* * In case of dynamic a volume, MTD device size is just volume size. In * case of a static volume the size is equivalent to the amount of data * bytes. */ if (vi->vol_type == UBI_DYNAMIC_VOLUME) mtd->size = (unsigned long long)vi->usable_leb_size * vi->size; else mtd->size = vi->used_bytes; /* Just a sanity check - make sure this gluebi device does not exist */ mutex_lock(&devices_mutex); g = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (g) err_msg("gluebi MTD device %d form UBI device %d volume %d already exists", g->mtd.index, vi->ubi_num, vi->vol_id); mutex_unlock(&devices_mutex); if (mtd_device_register(mtd, NULL, 0)) { err_msg("cannot add MTD device"); kfree(mtd->name); kfree(gluebi); return -ENFILE; } mutex_lock(&devices_mutex); list_add_tail(&gluebi->list, &gluebi_devices); mutex_unlock(&devices_mutex); return 0; } /** * gluebi_remove - remove a gluebi device. * @vi: UBI volume description object * * This function is called when an UBI volume is removed and it removes * corresponding fake MTD device. Returns zero in case of success and a * negative error code in case of failure. */ static int gluebi_remove(struct ubi_volume_info *vi) { int err = 0; struct mtd_info *mtd; struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { err_msg("got remove notification for unknown UBI device %d volume %d", vi->ubi_num, vi->vol_id); err = -ENOENT; } else if (gluebi->refcnt) err = -EBUSY; else list_del(&gluebi->list); mutex_unlock(&devices_mutex); if (err) return err; mtd = &gluebi->mtd; err = mtd_device_unregister(mtd); if (err) { err_msg("cannot remove fake MTD device %d, UBI device %d, volume %d, error %d", mtd->index, gluebi->ubi_num, gluebi->vol_id, err); mutex_lock(&devices_mutex); list_add_tail(&gluebi->list, &gluebi_devices); mutex_unlock(&devices_mutex); return err; } kfree(mtd->name); kfree(gluebi); return 0; } /** * gluebi_updated - UBI volume was updated notifier. * @vi: volume info structure * * This function is called every time an UBI volume is updated. It does nothing * if te volume @vol is dynamic, and changes MTD device size if the * volume is static. This is needed because static volumes cannot be read past * data they contain. This function returns zero in case of success and a * negative error code in case of error. */ static int gluebi_updated(struct ubi_volume_info *vi) { struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { mutex_unlock(&devices_mutex); err_msg("got update notification for unknown UBI device %d volume %d", vi->ubi_num, vi->vol_id); return -ENOENT; } if (vi->vol_type == UBI_STATIC_VOLUME) gluebi->mtd.size = vi->used_bytes; mutex_unlock(&devices_mutex); return 0; } /** * gluebi_resized - UBI volume was re-sized notifier. * @vi: volume info structure * * This function is called every time an UBI volume is re-size. It changes the * corresponding fake MTD device size. This function returns zero in case of * success and a negative error code in case of error. */ static int gluebi_resized(struct ubi_volume_info *vi) { struct gluebi_device *gluebi; mutex_lock(&devices_mutex); gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); if (!gluebi) { mutex_unlock(&devices_mutex); err_msg("got update notification for unknown UBI device %d volume %d", vi->ubi_num, vi->vol_id); return -ENOENT; } gluebi->mtd.size = vi->used_bytes; mutex_unlock(&devices_mutex); return 0; } /** * gluebi_notify - UBI notification handler. * @nb: registered notifier block * @l: notification type * @ns_ptr: pointer to the &struct ubi_notification object */ static int gluebi_notify(struct notifier_block *nb, unsigned long l, void *ns_ptr) { struct ubi_notification *nt = ns_ptr; switch (l) { case UBI_VOLUME_ADDED: gluebi_create(&nt->di, &nt->vi); break; case UBI_VOLUME_REMOVED: gluebi_remove(&nt->vi); break; case UBI_VOLUME_RESIZED: gluebi_resized(&nt->vi); break; case UBI_VOLUME_UPDATED: gluebi_updated(&nt->vi); break; default: break; } return NOTIFY_OK; } static struct notifier_block gluebi_notifier = { .notifier_call = gluebi_notify, }; static int __init ubi_gluebi_init(void) { return ubi_register_volume_notifier(&gluebi_notifier, 0); } static void __exit ubi_gluebi_exit(void) { struct gluebi_device *gluebi, *g; list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) { int err; struct mtd_info *mtd = &gluebi->mtd; err = mtd_device_unregister(mtd); if (err) err_msg("error %d while removing gluebi MTD device %d, UBI device %d, volume %d - ignoring", err, mtd->index, gluebi->ubi_num, gluebi->vol_id); kfree(mtd->name); kfree(gluebi); } ubi_unregister_volume_notifier(&gluebi_notifier); } module_init(ubi_gluebi_init); module_exit(ubi_gluebi_exit); MODULE_DESCRIPTION("MTD emulation layer over UBI volumes"); MODULE_AUTHOR("Artem Bityutskiy, Joern Engel"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * PSC clock descriptions for TI DA830/OMAP-L137/AM17XX * * Copyright (C) 2018 David Lechner <[email protected]> */ #include <linux/clk-provider.h> #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/types.h> #include "psc.h" LPSC_CLKDEV1(aemif_clkdev, NULL, "ti-aemif"); LPSC_CLKDEV1(spi0_clkdev, NULL, "spi_davinci.0"); LPSC_CLKDEV1(mmcsd_clkdev, NULL, "da830-mmc.0"); LPSC_CLKDEV1(uart0_clkdev, NULL, "serial8250.0"); static const struct davinci_lpsc_clk_info da830_psc0_info[] = { LPSC(0, 0, tpcc, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED), LPSC(1, 0, tptc0, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED), LPSC(2, 0, tptc1, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED), LPSC(3, 0, aemif, pll0_sysclk3, aemif_clkdev, LPSC_ALWAYS_ENABLED), LPSC(4, 0, spi0, pll0_sysclk2, spi0_clkdev, 0), LPSC(5, 0, mmcsd, pll0_sysclk2, mmcsd_clkdev, 0), LPSC(6, 0, aintc, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED), LPSC(7, 0, arm_rom, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED), LPSC(8, 0, secu_mgr, pll0_sysclk4, NULL, LPSC_ALWAYS_ENABLED), LPSC(9, 0, uart0, pll0_sysclk2, uart0_clkdev, 0), LPSC(10, 0, scr0_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED), LPSC(11, 0, scr1_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED), LPSC(12, 0, scr2_ss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED), LPSC(13, 0, pruss, pll0_sysclk2, NULL, LPSC_ALWAYS_ENABLED), LPSC(14, 0, arm, pll0_sysclk6, NULL, LPSC_ALWAYS_ENABLED), { } }; static int da830_psc0_init(struct device *dev, void __iomem *base) { return davinci_psc_register_clocks(dev, da830_psc0_info, 16, base); } static struct clk_bulk_data da830_psc0_parent_clks[] = { { .id = "pll0_sysclk2" }, { .id = "pll0_sysclk3" }, { .id = "pll0_sysclk4" }, { .id = "pll0_sysclk6" }, }; const struct davinci_psc_init_data da830_psc0_init_data = { .parent_clks = da830_psc0_parent_clks, .num_parent_clks = ARRAY_SIZE(da830_psc0_parent_clks), .psc_init = &da830_psc0_init, }; LPSC_CLKDEV3(usb0_clkdev, "fck", "da830-usb-phy-clks", NULL, "musb-da8xx", NULL, "cppi41-dmaengine"); LPSC_CLKDEV1(usb1_clkdev, NULL, "ohci-da8xx"); /* REVISIT: gpio-davinci.c should be modified to drop con_id */ LPSC_CLKDEV1(gpio_clkdev, "gpio", NULL); LPSC_CLKDEV2(emac_clkdev, NULL, "davinci_emac.1", "fck", "davinci_mdio.0"); LPSC_CLKDEV1(mcasp0_clkdev, NULL, "davinci-mcasp.0"); LPSC_CLKDEV1(mcasp1_clkdev, NULL, "davinci-mcasp.1"); LPSC_CLKDEV1(mcasp2_clkdev, NULL, "davinci-mcasp.2"); LPSC_CLKDEV1(spi1_clkdev, NULL, "spi_davinci.1"); LPSC_CLKDEV1(i2c1_clkdev, NULL, "i2c_davinci.2"); LPSC_CLKDEV1(uart1_clkdev, NULL, "serial8250.1"); LPSC_CLKDEV1(uart2_clkdev, NULL, "serial8250.2"); LPSC_CLKDEV1(lcdc_clkdev, "fck", "da8xx_lcdc.0"); LPSC_CLKDEV2(pwm_clkdev, "fck", "ehrpwm.0", "fck", "ehrpwm.1"); LPSC_CLKDEV3(ecap_clkdev, "fck", "ecap.0", "fck", "ecap.1", "fck", "ecap.2"); LPSC_CLKDEV2(eqep_clkdev, NULL, "eqep.0", NULL, "eqep.1"); static const struct davinci_lpsc_clk_info da830_psc1_info[] = { LPSC(1, 0, usb0, pll0_sysclk2, usb0_clkdev, 0), LPSC(2, 0, usb1, pll0_sysclk4, usb1_clkdev, 0), LPSC(3, 0, gpio, pll0_sysclk4, gpio_clkdev, 0), LPSC(5, 0, emac, pll0_sysclk4, emac_clkdev, 0), LPSC(6, 0, emif3, pll0_sysclk5, NULL, LPSC_ALWAYS_ENABLED), LPSC(7, 0, mcasp0, pll0_sysclk2, mcasp0_clkdev, 0), LPSC(8, 0, mcasp1, pll0_sysclk2, mcasp1_clkdev, 0), LPSC(9, 0, mcasp2, pll0_sysclk2, mcasp2_clkdev, 0), LPSC(10, 0, spi1, pll0_sysclk2, spi1_clkdev, 0), LPSC(11, 0, i2c1, pll0_sysclk4, i2c1_clkdev, 0), LPSC(12, 0, uart1, pll0_sysclk2, uart1_clkdev, 0), LPSC(13, 0, uart2, pll0_sysclk2, uart2_clkdev, 0), LPSC(16, 0, lcdc, pll0_sysclk2, lcdc_clkdev, 0), LPSC(17, 0, pwm, pll0_sysclk2, pwm_clkdev, 0), LPSC(20, 0, ecap, pll0_sysclk2, ecap_clkdev, 0), LPSC(21, 0, eqep, pll0_sysclk2, eqep_clkdev, 0), { } }; static int da830_psc1_init(struct device *dev, void __iomem *base) { return davinci_psc_register_clocks(dev, da830_psc1_info, 32, base); } static struct clk_bulk_data da830_psc1_parent_clks[] = { { .id = "pll0_sysclk2" }, { .id = "pll0_sysclk4" }, { .id = "pll0_sysclk5" }, }; const struct davinci_psc_init_data da830_psc1_init_data = { .parent_clks = da830_psc1_parent_clks, .num_parent_clks = ARRAY_SIZE(da830_psc1_parent_clks), .psc_init = &da830_psc1_init, };
// SPDX-License-Identifier: GPL-2.0 /* * Combined GPIO and pin controller support for Renesas RZ/A1 (r7s72100) SoC * * Copyright (C) 2017 Jacopo Mondi */ /* * This pin controller/gpio combined driver supports Renesas devices of RZ/A1 * family. * This includes SoCs which are sub- or super- sets of this particular line, * as RZ/A1H (r7s721000), RZ/A1M (r7s721010) and RZ/A1L (r7s721020). */ #include <linux/bitops.h> #include <linux/err.h> #include <linux/gpio/driver.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/pinctrl/pinctrl.h> #include <linux/pinctrl/pinmux.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/slab.h> #include "../core.h" #include "../devicetree.h" #include "../pinconf.h" #include "../pinmux.h" #define DRIVER_NAME "pinctrl-rza1" #define RZA1_P_REG 0x0000 #define RZA1_PPR_REG 0x0200 #define RZA1_PM_REG 0x0300 #define RZA1_PMC_REG 0x0400 #define RZA1_PFC_REG 0x0500 #define RZA1_PFCE_REG 0x0600 #define RZA1_PFCEA_REG 0x0a00 #define RZA1_PIBC_REG 0x4000 #define RZA1_PBDC_REG 0x4100 #define RZA1_PIPC_REG 0x4200 #define RZA1_ADDR(mem, reg, port) ((mem) + (reg) + ((port) * 4)) #define RZA1_NPORTS 12 #define RZA1_PINS_PER_PORT 16 #define RZA1_NPINS (RZA1_PINS_PER_PORT * RZA1_NPORTS) #define RZA1_PIN_ID_TO_PORT(id) ((id) / RZA1_PINS_PER_PORT) #define RZA1_PIN_ID_TO_PIN(id) ((id) % RZA1_PINS_PER_PORT) /* * Use 16 lower bits [15:0] for pin identifier * Use 16 higher bits [31:16] for pin mux function */ #define MUX_PIN_ID_MASK GENMASK(15, 0) #define MUX_FUNC_MASK GENMASK(31, 16) #define MUX_FUNC_OFFS 16 #define MUX_FUNC(pinconf) \ ((pinconf & MUX_FUNC_MASK) >> MUX_FUNC_OFFS) #define MUX_FUNC_PFC_MASK BIT(0) #define MUX_FUNC_PFCE_MASK BIT(1) #define MUX_FUNC_PFCEA_MASK BIT(2) /* Pin mux flags */ #define MUX_FLAGS_BIDIR BIT(0) #define MUX_FLAGS_SWIO_INPUT BIT(1) #define MUX_FLAGS_SWIO_OUTPUT BIT(2) /* ---------------------------------------------------------------------------- * RZ/A1 pinmux flags */ /* * rza1_bidir_pin - describe a single pin that needs bidir flag applied. */ struct rza1_bidir_pin { u8 pin: 4; u8 func: 4; }; /* * rza1_bidir_entry - describe a list of pins that needs bidir flag applied. * Each struct rza1_bidir_entry describes a port. */ struct rza1_bidir_entry { const unsigned int npins; const struct rza1_bidir_pin *pins; }; /* * rza1_swio_pin - describe a single pin that needs swio flag applied. */ struct rza1_swio_pin { u16 pin: 4; u16 port: 4; u16 func: 4; u16 input: 1; }; /* * rza1_swio_entry - describe a list of pins that needs swio flag applied */ struct rza1_swio_entry { const unsigned int npins; const struct rza1_swio_pin *pins; }; /* * rza1_pinmux_conf - group together bidir and swio pinmux flag tables */ struct rza1_pinmux_conf { const struct rza1_bidir_entry *bidir_entries; const struct rza1_swio_entry *swio_entries; }; /* ---------------------------------------------------------------------------- * RZ/A1H (r7s72100) pinmux flags */ static const struct rza1_bidir_pin rza1h_bidir_pins_p1[] = { { .pin = 0, .func = 1 }, { .pin = 1, .func = 1 }, { .pin = 2, .func = 1 }, { .pin = 3, .func = 1 }, { .pin = 4, .func = 1 }, { .pin = 5, .func = 1 }, { .pin = 6, .func = 1 }, { .pin = 7, .func = 1 }, }; static const struct rza1_bidir_pin rza1h_bidir_pins_p2[] = { { .pin = 0, .func = 1 }, { .pin = 1, .func = 1 }, { .pin = 2, .func = 1 }, { .pin = 3, .func = 1 }, { .pin = 4, .func = 1 }, { .pin = 0, .func = 4 }, { .pin = 1, .func = 4 }, { .pin = 2, .func = 4 }, { .pin = 3, .func = 4 }, { .pin = 5, .func = 1 }, { .pin = 6, .func = 1 }, { .pin = 7, .func = 1 }, { .pin = 8, .func = 1 }, { .pin = 9, .func = 1 }, { .pin = 10, .func = 1 }, { .pin = 11, .func = 1 }, { .pin = 12, .func = 1 }, { .pin = 13, .func = 1 }, { .pin = 14, .func = 1 }, { .pin = 15, .func = 1 }, { .pin = 12, .func = 4 }, { .pin = 13, .func = 4 }, { .pin = 14, .func = 4 }, { .pin = 15, .func = 4 }, }; static const struct rza1_bidir_pin rza1h_bidir_pins_p3[] = { { .pin = 3, .func = 2 }, { .pin = 10, .func = 7 }, { .pin = 11, .func = 7 }, { .pin = 13, .func = 7 }, { .pin = 14, .func = 7 }, { .pin = 15, .func = 7 }, { .pin = 10, .func = 8 }, { .pin = 11, .func = 8 }, { .pin = 13, .func = 8 }, { .pin = 14, .func = 8 }, { .pin = 15, .func = 8 }, }; static const struct rza1_bidir_pin rza1h_bidir_pins_p4[] = { { .pin = 0, .func = 8 }, { .pin = 1, .func = 8 }, { .pin = 2, .func = 8 }, { .pin = 3, .func = 8 }, { .pin = 10, .func = 3 }, { .pin = 11, .func = 3 }, { .pin = 13, .func = 3 }, { .pin = 14, .func = 3 }, { .pin = 15, .func = 3 }, { .pin = 10, .func = 4 }, { .pin = 11, .func = 4 }, { .pin = 13, .func = 4 }, { .pin = 14, .func = 4 }, { .pin = 15, .func = 4 }, { .pin = 12, .func = 5 }, { .pin = 13, .func = 5 }, { .pin = 14, .func = 5 }, { .pin = 15, .func = 5 }, }; static const struct rza1_bidir_pin rza1h_bidir_pins_p6[] = { { .pin = 0, .func = 1 }, { .pin = 1, .func = 1 }, { .pin = 2, .func = 1 }, { .pin = 3, .func = 1 }, { .pin = 4, .func = 1 }, { .pin = 5, .func = 1 }, { .pin = 6, .func = 1 }, { .pin = 7, .func = 1 }, { .pin = 8, .func = 1 }, { .pin = 9, .func = 1 }, { .pin = 10, .func = 1 }, { .pin = 11, .func = 1 }, { .pin = 12, .func = 1 }, { .pin = 13, .func = 1 }, { .pin = 14, .func = 1 }, { .pin = 15, .func = 1 }, }; static const struct rza1_bidir_pin rza1h_bidir_pins_p7[] = { { .pin = 13, .func = 3 }, }; static const struct rza1_bidir_pin rza1h_bidir_pins_p8[] = { { .pin = 8, .func = 3 }, { .pin = 9, .func = 3 }, { .pin = 10, .func = 3 }, { .pin = 11, .func = 3 }, { .pin = 14, .func = 2 }, { .pin = 15, .func = 2 }, { .pin = 14, .func = 3 }, { .pin = 15, .func = 3 }, }; static const struct rza1_bidir_pin rza1h_bidir_pins_p9[] = { { .pin = 0, .func = 2 }, { .pin = 1, .func = 2 }, { .pin = 4, .func = 2 }, { .pin = 5, .func = 2 }, { .pin = 6, .func = 2 }, { .pin = 7, .func = 2 }, }; static const struct rza1_bidir_pin rza1h_bidir_pins_p11[] = { { .pin = 6, .func = 2 }, { .pin = 7, .func = 2 }, { .pin = 9, .func = 2 }, { .pin = 6, .func = 4 }, { .pin = 7, .func = 4 }, { .pin = 9, .func = 4 }, { .pin = 10, .func = 2 }, { .pin = 11, .func = 2 }, { .pin = 10, .func = 4 }, { .pin = 11, .func = 4 }, { .pin = 12, .func = 4 }, { .pin = 13, .func = 4 }, { .pin = 14, .func = 4 }, { .pin = 15, .func = 4 }, }; static const struct rza1_swio_pin rza1h_swio_pins[] = { { .port = 2, .pin = 7, .func = 4, .input = 0 }, { .port = 2, .pin = 11, .func = 4, .input = 0 }, { .port = 3, .pin = 7, .func = 3, .input = 0 }, { .port = 3, .pin = 7, .func = 8, .input = 0 }, { .port = 4, .pin = 7, .func = 5, .input = 0 }, { .port = 4, .pin = 7, .func = 11, .input = 0 }, { .port = 4, .pin = 15, .func = 6, .input = 0 }, { .port = 5, .pin = 0, .func = 1, .input = 1 }, { .port = 5, .pin = 1, .func = 1, .input = 1 }, { .port = 5, .pin = 2, .func = 1, .input = 1 }, { .port = 5, .pin = 3, .func = 1, .input = 1 }, { .port = 5, .pin = 4, .func = 1, .input = 1 }, { .port = 5, .pin = 5, .func = 1, .input = 1 }, { .port = 5, .pin = 6, .func = 1, .input = 1 }, { .port = 5, .pin = 7, .func = 1, .input = 1 }, { .port = 7, .pin = 4, .func = 6, .input = 0 }, { .port = 7, .pin = 11, .func = 2, .input = 0 }, { .port = 8, .pin = 10, .func = 8, .input = 0 }, { .port = 10, .pin = 15, .func = 2, .input = 0 }, }; static const struct rza1_bidir_entry rza1h_bidir_entries[RZA1_NPORTS] = { [1] = { ARRAY_SIZE(rza1h_bidir_pins_p1), rza1h_bidir_pins_p1 }, [2] = { ARRAY_SIZE(rza1h_bidir_pins_p2), rza1h_bidir_pins_p2 }, [3] = { ARRAY_SIZE(rza1h_bidir_pins_p3), rza1h_bidir_pins_p3 }, [4] = { ARRAY_SIZE(rza1h_bidir_pins_p4), rza1h_bidir_pins_p4 }, [6] = { ARRAY_SIZE(rza1h_bidir_pins_p6), rza1h_bidir_pins_p6 }, [7] = { ARRAY_SIZE(rza1h_bidir_pins_p7), rza1h_bidir_pins_p7 }, [8] = { ARRAY_SIZE(rza1h_bidir_pins_p8), rza1h_bidir_pins_p8 }, [9] = { ARRAY_SIZE(rza1h_bidir_pins_p9), rza1h_bidir_pins_p9 }, [11] = { ARRAY_SIZE(rza1h_bidir_pins_p11), rza1h_bidir_pins_p11 }, }; static const struct rza1_swio_entry rza1h_swio_entries[] = { [0] = { ARRAY_SIZE(rza1h_swio_pins), rza1h_swio_pins }, }; /* RZ/A1H (r7s72100x) pinmux flags table */ static const struct rza1_pinmux_conf rza1h_pmx_conf = { .bidir_entries = rza1h_bidir_entries, .swio_entries = rza1h_swio_entries, }; /* ---------------------------------------------------------------------------- * RZ/A1L (r7s72102) pinmux flags */ static const struct rza1_bidir_pin rza1l_bidir_pins_p1[] = { { .pin = 0, .func = 1 }, { .pin = 1, .func = 1 }, { .pin = 2, .func = 1 }, { .pin = 3, .func = 1 }, { .pin = 4, .func = 1 }, { .pin = 5, .func = 1 }, { .pin = 6, .func = 1 }, { .pin = 7, .func = 1 }, }; static const struct rza1_bidir_pin rza1l_bidir_pins_p3[] = { { .pin = 0, .func = 2 }, { .pin = 1, .func = 2 }, { .pin = 2, .func = 2 }, { .pin = 4, .func = 2 }, { .pin = 5, .func = 2 }, { .pin = 10, .func = 2 }, { .pin = 11, .func = 2 }, { .pin = 12, .func = 2 }, { .pin = 13, .func = 2 }, }; static const struct rza1_bidir_pin rza1l_bidir_pins_p4[] = { { .pin = 1, .func = 4 }, { .pin = 2, .func = 2 }, { .pin = 3, .func = 2 }, { .pin = 6, .func = 2 }, { .pin = 7, .func = 2 }, }; static const struct rza1_bidir_pin rza1l_bidir_pins_p5[] = { { .pin = 0, .func = 1 }, { .pin = 1, .func = 1 }, { .pin = 2, .func = 1 }, { .pin = 3, .func = 1 }, { .pin = 4, .func = 1 }, { .pin = 5, .func = 1 }, { .pin = 6, .func = 1 }, { .pin = 7, .func = 1 }, { .pin = 8, .func = 1 }, { .pin = 9, .func = 1 }, { .pin = 10, .func = 1 }, { .pin = 11, .func = 1 }, { .pin = 12, .func = 1 }, { .pin = 13, .func = 1 }, { .pin = 14, .func = 1 }, { .pin = 15, .func = 1 }, { .pin = 0, .func = 2 }, { .pin = 1, .func = 2 }, { .pin = 2, .func = 2 }, { .pin = 3, .func = 2 }, }; static const struct rza1_bidir_pin rza1l_bidir_pins_p6[] = { { .pin = 0, .func = 1 }, { .pin = 1, .func = 1 }, { .pin = 2, .func = 1 }, { .pin = 3, .func = 1 }, { .pin = 4, .func = 1 }, { .pin = 5, .func = 1 }, { .pin = 6, .func = 1 }, { .pin = 7, .func = 1 }, { .pin = 8, .func = 1 }, { .pin = 9, .func = 1 }, { .pin = 10, .func = 1 }, { .pin = 11, .func = 1 }, { .pin = 12, .func = 1 }, { .pin = 13, .func = 1 }, { .pin = 14, .func = 1 }, { .pin = 15, .func = 1 }, }; static const struct rza1_bidir_pin rza1l_bidir_pins_p7[] = { { .pin = 2, .func = 2 }, { .pin = 3, .func = 2 }, { .pin = 5, .func = 2 }, { .pin = 6, .func = 2 }, { .pin = 7, .func = 2 }, { .pin = 2, .func = 3 }, { .pin = 3, .func = 3 }, { .pin = 5, .func = 3 }, { .pin = 6, .func = 3 }, { .pin = 7, .func = 3 }, }; static const struct rza1_bidir_pin rza1l_bidir_pins_p9[] = { { .pin = 1, .func = 2 }, { .pin = 0, .func = 3 }, { .pin = 1, .func = 3 }, { .pin = 3, .func = 3 }, { .pin = 4, .func = 3 }, { .pin = 5, .func = 3 }, }; static const struct rza1_swio_pin rza1l_swio_pins[] = { { .port = 2, .pin = 8, .func = 2, .input = 0 }, { .port = 5, .pin = 6, .func = 3, .input = 0 }, { .port = 6, .pin = 6, .func = 3, .input = 0 }, { .port = 6, .pin = 10, .func = 3, .input = 0 }, { .port = 7, .pin = 10, .func = 2, .input = 0 }, { .port = 8, .pin = 2, .func = 3, .input = 0 }, }; static const struct rza1_bidir_entry rza1l_bidir_entries[RZA1_NPORTS] = { [1] = { ARRAY_SIZE(rza1l_bidir_pins_p1), rza1l_bidir_pins_p1 }, [3] = { ARRAY_SIZE(rza1l_bidir_pins_p3), rza1l_bidir_pins_p3 }, [4] = { ARRAY_SIZE(rza1l_bidir_pins_p4), rza1l_bidir_pins_p4 }, [5] = { ARRAY_SIZE(rza1l_bidir_pins_p4), rza1l_bidir_pins_p5 }, [6] = { ARRAY_SIZE(rza1l_bidir_pins_p6), rza1l_bidir_pins_p6 }, [7] = { ARRAY_SIZE(rza1l_bidir_pins_p7), rza1l_bidir_pins_p7 }, [9] = { ARRAY_SIZE(rza1l_bidir_pins_p9), rza1l_bidir_pins_p9 }, }; static const struct rza1_swio_entry rza1l_swio_entries[] = { [0] = { ARRAY_SIZE(rza1l_swio_pins), rza1l_swio_pins }, }; /* RZ/A1L (r7s72102x) pinmux flags table */ static const struct rza1_pinmux_conf rza1l_pmx_conf = { .bidir_entries = rza1l_bidir_entries, .swio_entries = rza1l_swio_entries, }; /* ---------------------------------------------------------------------------- * RZ/A1 types */ /** * struct rza1_mux_conf - describes a pin multiplexing operation * * @id: the pin identifier from 0 to RZA1_NPINS * @port: the port where pin sits on * @pin: pin id * @mux_func: alternate function id number * @mux_flags: alternate function flags * @value: output value to set the pin to */ struct rza1_mux_conf { u16 id; u8 port; u8 pin; u8 mux_func; u8 mux_flags; u8 value; }; /** * struct rza1_port - describes a pin port * * This is mostly useful to lock register writes per-bank and not globally. * * @lock: protect access to HW registers * @id: port number * @base: logical address base * @pins: pins sitting on this port */ struct rza1_port { spinlock_t lock; unsigned int id; void __iomem *base; struct pinctrl_pin_desc *pins; }; /** * struct rza1_pinctrl - RZ pincontroller device * * @dev: parent device structure * @mutex: protect [pinctrl|pinmux]_generic functions * @base: logical address base * @nport: number of pin controller ports * @ports: pin controller banks * @pins: pin array for pinctrl core * @desc: pincontroller desc for pinctrl core * @pctl: pinctrl device * @data: device specific data */ struct rza1_pinctrl { struct device *dev; struct mutex mutex; void __iomem *base; unsigned int nport; struct rza1_port *ports; struct pinctrl_pin_desc *pins; struct pinctrl_desc desc; struct pinctrl_dev *pctl; const void *data; }; /* ---------------------------------------------------------------------------- * RZ/A1 pinmux flags */ static inline bool rza1_pinmux_get_bidir(unsigned int port, unsigned int pin, unsigned int func, const struct rza1_bidir_entry *table) { const struct rza1_bidir_entry *entry = &table[port]; const struct rza1_bidir_pin *bidir_pin; unsigned int i; for (i = 0; i < entry->npins; ++i) { bidir_pin = &entry->pins[i]; if (bidir_pin->pin == pin && bidir_pin->func == func) return true; } return false; } static inline int rza1_pinmux_get_swio(unsigned int port, unsigned int pin, unsigned int func, const struct rza1_swio_entry *table) { const struct rza1_swio_pin *swio_pin; unsigned int i; for (i = 0; i < table->npins; ++i) { swio_pin = &table->pins[i]; if (swio_pin->port == port && swio_pin->pin == pin && swio_pin->func == func) return swio_pin->input; } return -ENOENT; } /* * rza1_pinmux_get_flags() - return pinmux flags associated to a pin */ static unsigned int rza1_pinmux_get_flags(unsigned int port, unsigned int pin, unsigned int func, struct rza1_pinctrl *rza1_pctl) { const struct rza1_pinmux_conf *pmx_conf = rza1_pctl->data; const struct rza1_bidir_entry *bidir_entries = pmx_conf->bidir_entries; const struct rza1_swio_entry *swio_entries = pmx_conf->swio_entries; unsigned int pmx_flags = 0; int ret; if (rza1_pinmux_get_bidir(port, pin, func, bidir_entries)) pmx_flags |= MUX_FLAGS_BIDIR; ret = rza1_pinmux_get_swio(port, pin, func, swio_entries); if (ret == 0) pmx_flags |= MUX_FLAGS_SWIO_OUTPUT; else if (ret > 0) pmx_flags |= MUX_FLAGS_SWIO_INPUT; return pmx_flags; } /* ---------------------------------------------------------------------------- * RZ/A1 SoC operations */ /* * rza1_set_bit() - un-locked set/clear a single bit in pin configuration * registers */ static inline void rza1_set_bit(struct rza1_port *port, unsigned int reg, unsigned int bit, bool set) { void __iomem *mem = RZA1_ADDR(port->base, reg, port->id); u16 val = ioread16(mem); if (set) val |= BIT(bit); else val &= ~BIT(bit); iowrite16(val, mem); } static inline unsigned int rza1_get_bit(struct rza1_port *port, unsigned int reg, unsigned int bit) { void __iomem *mem = RZA1_ADDR(port->base, reg, port->id); return ioread16(mem) & BIT(bit); } /** * rza1_pin_reset() - reset a pin to default initial state * * Reset pin state disabling input buffer and bi-directional control, * and configure it as input port. * Note that pin is now configured with direction as input but with input * buffer disabled. This implies the pin value cannot be read in this state. * * @port: port where pin sits on * @pin: pin offset */ static void rza1_pin_reset(struct rza1_port *port, unsigned int pin) { unsigned long irqflags; spin_lock_irqsave(&port->lock, irqflags); rza1_set_bit(port, RZA1_PIBC_REG, pin, 0); rza1_set_bit(port, RZA1_PBDC_REG, pin, 0); rza1_set_bit(port, RZA1_PM_REG, pin, 1); rza1_set_bit(port, RZA1_PMC_REG, pin, 0); rza1_set_bit(port, RZA1_PIPC_REG, pin, 0); spin_unlock_irqrestore(&port->lock, irqflags); } /** * rza1_pin_set_direction() - set I/O direction on a pin in port mode * * When running in output port mode keep PBDC enabled to allow reading the * pin value from PPR. * * @port: port where pin sits on * @pin: pin offset * @input: input enable/disable flag */ static inline void rza1_pin_set_direction(struct rza1_port *port, unsigned int pin, bool input) { unsigned long irqflags; spin_lock_irqsave(&port->lock, irqflags); rza1_set_bit(port, RZA1_PIBC_REG, pin, 1); if (input) { rza1_set_bit(port, RZA1_PM_REG, pin, 1); rza1_set_bit(port, RZA1_PBDC_REG, pin, 0); } else { rza1_set_bit(port, RZA1_PM_REG, pin, 0); rza1_set_bit(port, RZA1_PBDC_REG, pin, 1); } spin_unlock_irqrestore(&port->lock, irqflags); } static inline void rza1_pin_set(struct rza1_port *port, unsigned int pin, unsigned int value) { unsigned long irqflags; spin_lock_irqsave(&port->lock, irqflags); rza1_set_bit(port, RZA1_P_REG, pin, !!value); spin_unlock_irqrestore(&port->lock, irqflags); } static inline int rza1_pin_get(struct rza1_port *port, unsigned int pin) { return rza1_get_bit(port, RZA1_PPR_REG, pin); } /** * rza1_pin_mux_single() - configure pin multiplexing on a single pin * * @rza1_pctl: RZ/A1 pin controller device * @mux_conf: pin multiplexing descriptor */ static int rza1_pin_mux_single(struct rza1_pinctrl *rza1_pctl, struct rza1_mux_conf *mux_conf) { struct rza1_port *port = &rza1_pctl->ports[mux_conf->port]; unsigned int pin = mux_conf->pin; u8 mux_func = mux_conf->mux_func; u8 mux_flags = mux_conf->mux_flags; u8 mux_flags_from_table; rza1_pin_reset(port, pin); /* SWIO pinmux flags coming from DT are high precedence */ mux_flags_from_table = rza1_pinmux_get_flags(port->id, pin, mux_func, rza1_pctl); if (mux_flags) mux_flags |= (mux_flags_from_table & MUX_FLAGS_BIDIR); else mux_flags = mux_flags_from_table; if (mux_flags & MUX_FLAGS_BIDIR) rza1_set_bit(port, RZA1_PBDC_REG, pin, 1); /* * Enable alternate function mode and select it. * * Be careful here: the pin mux sub-nodes in device tree * enumerate alternate functions from 1 to 8; * subtract 1 before using macros to match registers configuration * which expects numbers from 0 to 7 instead. * * ---------------------------------------------------- * Alternate mode selection table: * * PMC PFC PFCE PFCAE (mux_func - 1) * 1 0 0 0 0 * 1 1 0 0 1 * 1 0 1 0 2 * 1 1 1 0 3 * 1 0 0 1 4 * 1 1 0 1 5 * 1 0 1 1 6 * 1 1 1 1 7 * ---------------------------------------------------- */ mux_func -= 1; rza1_set_bit(port, RZA1_PFC_REG, pin, mux_func & MUX_FUNC_PFC_MASK); rza1_set_bit(port, RZA1_PFCE_REG, pin, mux_func & MUX_FUNC_PFCE_MASK); rza1_set_bit(port, RZA1_PFCEA_REG, pin, mux_func & MUX_FUNC_PFCEA_MASK); /* * All alternate functions except a few need PIPCn = 1. * If PIPCn has to stay disabled (SW IO mode), configure PMn according * to I/O direction specified by pin configuration -after- PMC has been * set to one. */ if (mux_flags & (MUX_FLAGS_SWIO_INPUT | MUX_FLAGS_SWIO_OUTPUT)) rza1_set_bit(port, RZA1_PM_REG, pin, mux_flags & MUX_FLAGS_SWIO_INPUT); else rza1_set_bit(port, RZA1_PIPC_REG, pin, 1); rza1_set_bit(port, RZA1_PMC_REG, pin, 1); return 0; } /* ---------------------------------------------------------------------------- * gpio operations */ /** * rza1_gpio_request() - configure pin in port mode * * Configure a pin as gpio (port mode). * After reset, the pin is in input mode with input buffer disabled. * To use the pin as input or output, set_direction shall be called first * * @chip: gpio chip where the gpio sits on * @gpio: gpio offset */ static int rza1_gpio_request(struct gpio_chip *chip, unsigned int gpio) { struct rza1_port *port = gpiochip_get_data(chip); int ret; ret = pinctrl_gpio_request(chip, gpio); if (ret) return ret; rza1_pin_reset(port, gpio); return 0; } /** * rza1_gpio_free() - reset a pin * * Surprisingly, freeing a gpio is equivalent to requesting it. * Reset pin to port mode, with input buffer disabled. This overwrites all * port direction settings applied with set_direction * * @chip: gpio chip where the gpio sits on * @gpio: gpio offset */ static void rza1_gpio_free(struct gpio_chip *chip, unsigned int gpio) { struct rza1_port *port = gpiochip_get_data(chip); rza1_pin_reset(port, gpio); pinctrl_gpio_free(chip, gpio); } static int rza1_gpio_get_direction(struct gpio_chip *chip, unsigned int gpio) { struct rza1_port *port = gpiochip_get_data(chip); if (rza1_get_bit(port, RZA1_PM_REG, gpio)) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; } static int rza1_gpio_direction_input(struct gpio_chip *chip, unsigned int gpio) { struct rza1_port *port = gpiochip_get_data(chip); rza1_pin_set_direction(port, gpio, true); return 0; } static int rza1_gpio_direction_output(struct gpio_chip *chip, unsigned int gpio, int value) { struct rza1_port *port = gpiochip_get_data(chip); /* Set value before driving pin direction */ rza1_pin_set(port, gpio, value); rza1_pin_set_direction(port, gpio, false); return 0; } /** * rza1_gpio_get() - read a gpio pin value * * Read gpio pin value through PPR register. * Requires bi-directional mode to work when reading the value of a pin * in output mode * * @chip: gpio chip where the gpio sits on * @gpio: gpio offset */ static int rza1_gpio_get(struct gpio_chip *chip, unsigned int gpio) { struct rza1_port *port = gpiochip_get_data(chip); return rza1_pin_get(port, gpio); } static void rza1_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value) { struct rza1_port *port = gpiochip_get_data(chip); rza1_pin_set(port, gpio, value); } static const struct gpio_chip rza1_gpiochip_template = { .request = rza1_gpio_request, .free = rza1_gpio_free, .get_direction = rza1_gpio_get_direction, .direction_input = rza1_gpio_direction_input, .direction_output = rza1_gpio_direction_output, .get = rza1_gpio_get, .set = rza1_gpio_set, }; /* ---------------------------------------------------------------------------- * pinctrl operations */ /** * rza1_dt_node_pin_count() - Count number of pins in a dt node or in all its * children sub-nodes * * @np: device tree node to parse */ static int rza1_dt_node_pin_count(struct device_node *np) { struct property *of_pins; unsigned int npins; of_pins = of_find_property(np, "pinmux", NULL); if (of_pins) return of_pins->length / sizeof(u32); npins = 0; for_each_child_of_node_scoped(np, child) { of_pins = of_find_property(child, "pinmux", NULL); if (!of_pins) return -EINVAL; npins += of_pins->length / sizeof(u32); } return npins; } /** * rza1_parse_pinmux_node() - parse a pin mux sub-node * * @rza1_pctl: RZ/A1 pin controller device * @np: of pmx sub-node * @mux_confs: array of pin mux configurations to fill with parsed info * @grpins: array of pin ids to mux */ static int rza1_parse_pinmux_node(struct rza1_pinctrl *rza1_pctl, struct device_node *np, struct rza1_mux_conf *mux_confs, unsigned int *grpins) { struct pinctrl_dev *pctldev = rza1_pctl->pctl; char const *prop_name = "pinmux"; unsigned long *pin_configs; unsigned int npin_configs; struct property *of_pins; unsigned int npins; u8 pinmux_flags; unsigned int i; int ret; of_pins = of_find_property(np, prop_name, NULL); if (!of_pins) { dev_dbg(rza1_pctl->dev, "Missing %s property\n", prop_name); return -ENOENT; } npins = of_pins->length / sizeof(u32); /* * Collect pin configuration properties: they apply to all pins in * this sub-node */ ret = pinconf_generic_parse_dt_config(np, pctldev, &pin_configs, &npin_configs); if (ret) { dev_err(rza1_pctl->dev, "Unable to parse pin configuration options for %pOFn\n", np); return ret; } /* * Create a mask with pinmux flags from pin configuration; * very few pins (TIOC[0-4][A|B|C|D] require SWIO direction * specified in device tree. */ pinmux_flags = 0; for (i = 0; i < npin_configs && pinmux_flags == 0; i++) switch (pinconf_to_config_param(pin_configs[i])) { case PIN_CONFIG_INPUT_ENABLE: pinmux_flags |= MUX_FLAGS_SWIO_INPUT; break; case PIN_CONFIG_OUTPUT: /* for DT backwards compatibility */ case PIN_CONFIG_OUTPUT_ENABLE: pinmux_flags |= MUX_FLAGS_SWIO_OUTPUT; break; default: break; } kfree(pin_configs); /* Collect pin positions and their mux settings. */ for (i = 0; i < npins; ++i) { u32 of_pinconf; struct rza1_mux_conf *mux_conf = &mux_confs[i]; ret = of_property_read_u32_index(np, prop_name, i, &of_pinconf); if (ret) return ret; mux_conf->id = of_pinconf & MUX_PIN_ID_MASK; mux_conf->port = RZA1_PIN_ID_TO_PORT(mux_conf->id); mux_conf->pin = RZA1_PIN_ID_TO_PIN(mux_conf->id); mux_conf->mux_func = MUX_FUNC(of_pinconf); mux_conf->mux_flags = pinmux_flags; if (mux_conf->port >= RZA1_NPORTS || mux_conf->pin >= RZA1_PINS_PER_PORT) { dev_err(rza1_pctl->dev, "Wrong port %u pin %u for %s property\n", mux_conf->port, mux_conf->pin, prop_name); return -EINVAL; } grpins[i] = mux_conf->id; } return npins; } /** * rza1_dt_node_to_map() - map a pin mux node to a function/group * * Parse and register a pin mux function. * * @pctldev: pin controller device * @np: device tree node to parse * @map: pointer to pin map (output) * @num_maps: number of collected maps (output) */ static int rza1_dt_node_to_map(struct pinctrl_dev *pctldev, struct device_node *np, struct pinctrl_map **map, unsigned int *num_maps) { struct rza1_pinctrl *rza1_pctl = pinctrl_dev_get_drvdata(pctldev); struct rza1_mux_conf *mux_confs, *mux_conf; unsigned int *grpins, *grpin; const char *grpname; const char **fngrps; int ret, npins; int gsel, fsel; npins = rza1_dt_node_pin_count(np); if (npins < 0) { dev_err(rza1_pctl->dev, "invalid pinmux node structure\n"); return -EINVAL; } /* * Functions are made of 1 group only; * in fact, functions and groups are identical for this pin controller * except that functions carry an array of per-pin mux configuration * settings. */ mux_confs = devm_kcalloc(rza1_pctl->dev, npins, sizeof(*mux_confs), GFP_KERNEL); grpins = devm_kcalloc(rza1_pctl->dev, npins, sizeof(*grpins), GFP_KERNEL); fngrps = devm_kzalloc(rza1_pctl->dev, sizeof(*fngrps), GFP_KERNEL); if (!mux_confs || !grpins || !fngrps) return -ENOMEM; /* * Parse the pinmux node. * If the node does not contain "pinmux" property (-ENOENT) * that property shall be specified in all its children sub-nodes. */ mux_conf = &mux_confs[0]; grpin = &grpins[0]; ret = rza1_parse_pinmux_node(rza1_pctl, np, mux_conf, grpin); if (ret == -ENOENT) for_each_child_of_node_scoped(np, child) { ret = rza1_parse_pinmux_node(rza1_pctl, child, mux_conf, grpin); if (ret < 0) return ret; grpin += ret; mux_conf += ret; } else if (ret < 0) return ret; /* Register pin group and function name to pinctrl_generic */ grpname = np->name; fngrps[0] = grpname; mutex_lock(&rza1_pctl->mutex); gsel = pinctrl_generic_add_group(pctldev, grpname, grpins, npins, NULL); if (gsel < 0) { mutex_unlock(&rza1_pctl->mutex); return gsel; } fsel = pinmux_generic_add_function(pctldev, grpname, fngrps, 1, mux_confs); if (fsel < 0) { ret = fsel; goto remove_group; } dev_info(rza1_pctl->dev, "Parsed function and group %s with %d pins\n", grpname, npins); /* Create map where to retrieve function and mux settings from */ *num_maps = 0; *map = kzalloc(sizeof(**map), GFP_KERNEL); if (!*map) { ret = -ENOMEM; goto remove_function; } (*map)->type = PIN_MAP_TYPE_MUX_GROUP; (*map)->data.mux.group = np->name; (*map)->data.mux.function = np->name; *num_maps = 1; mutex_unlock(&rza1_pctl->mutex); return 0; remove_function: pinmux_generic_remove_function(pctldev, fsel); remove_group: pinctrl_generic_remove_group(pctldev, gsel); mutex_unlock(&rza1_pctl->mutex); dev_info(rza1_pctl->dev, "Unable to parse function and group %s\n", grpname); return ret; } static void rza1_dt_free_map(struct pinctrl_dev *pctldev, struct pinctrl_map *map, unsigned int num_maps) { kfree(map); } static const struct pinctrl_ops rza1_pinctrl_ops = { .get_groups_count = pinctrl_generic_get_group_count, .get_group_name = pinctrl_generic_get_group_name, .get_group_pins = pinctrl_generic_get_group_pins, .dt_node_to_map = rza1_dt_node_to_map, .dt_free_map = rza1_dt_free_map, }; /* ---------------------------------------------------------------------------- * pinmux operations */ /** * rza1_set_mux() - retrieve pins from a group and apply their mux settings * * @pctldev: pin controller device * @selector: function selector * @group: group selector */ static int rza1_set_mux(struct pinctrl_dev *pctldev, unsigned int selector, unsigned int group) { struct rza1_pinctrl *rza1_pctl = pinctrl_dev_get_drvdata(pctldev); struct rza1_mux_conf *mux_confs; struct function_desc *func; struct group_desc *grp; int i; grp = pinctrl_generic_get_group(pctldev, group); if (!grp) return -EINVAL; func = pinmux_generic_get_function(pctldev, selector); if (!func) return -EINVAL; mux_confs = (struct rza1_mux_conf *)func->data; for (i = 0; i < grp->grp.npins; ++i) { int ret; ret = rza1_pin_mux_single(rza1_pctl, &mux_confs[i]); if (ret) return ret; } return 0; } static const struct pinmux_ops rza1_pinmux_ops = { .get_functions_count = pinmux_generic_get_function_count, .get_function_name = pinmux_generic_get_function_name, .get_function_groups = pinmux_generic_get_function_groups, .set_mux = rza1_set_mux, .strict = true, }; /* ---------------------------------------------------------------------------- * RZ/A1 pin controller driver operations */ /** * rza1_parse_gpiochip() - parse and register a gpio chip and pin range * * The gpio controller subnode shall provide a "gpio-ranges" list property as * defined by gpio device tree binding documentation. * * @rza1_pctl: RZ/A1 pin controller device * @fwnode: gpio-controller firmware node * @chip: gpio chip to register to gpiolib * @range: pin range to register to pinctrl core */ static int rza1_parse_gpiochip(struct rza1_pinctrl *rza1_pctl, struct fwnode_handle *fwnode, struct gpio_chip *chip, struct pinctrl_gpio_range *range) { const char *list_name = "gpio-ranges"; struct fwnode_reference_args args; unsigned int gpioport; u32 pinctrl_base; int ret; ret = fwnode_property_get_reference_args(fwnode, list_name, NULL, 3, 0, &args); if (ret) { dev_err(rza1_pctl->dev, "Unable to parse %s list property\n", list_name); return ret; } /* * Find out on which port this gpio-chip maps to by inspecting the * second argument of the "gpio-ranges" property. */ pinctrl_base = args.args[1]; gpioport = RZA1_PIN_ID_TO_PORT(pinctrl_base); if (gpioport >= RZA1_NPORTS) { dev_err(rza1_pctl->dev, "Invalid values in property %s\n", list_name); return -EINVAL; } *chip = rza1_gpiochip_template; chip->base = -1; chip->ngpio = args.args[2]; chip->label = devm_kasprintf(rza1_pctl->dev, GFP_KERNEL, "%pfwP", fwnode); if (!chip->label) return -ENOMEM; chip->fwnode = fwnode; chip->parent = rza1_pctl->dev; range->id = gpioport; range->name = chip->label; range->pin_base = range->base = pinctrl_base; range->npins = args.args[2]; range->gc = chip; ret = devm_gpiochip_add_data(rza1_pctl->dev, chip, &rza1_pctl->ports[gpioport]); if (ret) return ret; pinctrl_add_gpio_range(rza1_pctl->pctl, range); dev_dbg(rza1_pctl->dev, "Parsed gpiochip %s with %d pins\n", chip->label, chip->ngpio); return 0; } /** * rza1_gpio_register() - parse DT to collect gpio-chips and gpio-ranges * * @rza1_pctl: RZ/A1 pin controller device */ static int rza1_gpio_register(struct rza1_pinctrl *rza1_pctl) { struct pinctrl_gpio_range *gpio_ranges; struct gpio_chip *gpio_chips; struct fwnode_handle *child; unsigned int ngpiochips; unsigned int i; int ret; ngpiochips = gpiochip_node_count(rza1_pctl->dev); if (ngpiochips == 0) { dev_dbg(rza1_pctl->dev, "No gpiochip registered\n"); return 0; } gpio_chips = devm_kcalloc(rza1_pctl->dev, ngpiochips, sizeof(*gpio_chips), GFP_KERNEL); gpio_ranges = devm_kcalloc(rza1_pctl->dev, ngpiochips, sizeof(*gpio_ranges), GFP_KERNEL); if (!gpio_chips || !gpio_ranges) return -ENOMEM; i = 0; for_each_gpiochip_node(rza1_pctl->dev, child) { ret = rza1_parse_gpiochip(rza1_pctl, child, &gpio_chips[i], &gpio_ranges[i]); if (ret) { fwnode_handle_put(child); return ret; } ++i; } dev_info(rza1_pctl->dev, "Registered %u gpio controllers\n", i); return 0; } /** * rza1_pinctrl_register() - Enumerate pins, ports and gpiochips; register * them to pinctrl and gpio cores. * * @rza1_pctl: RZ/A1 pin controller device */ static int rza1_pinctrl_register(struct rza1_pinctrl *rza1_pctl) { struct pinctrl_pin_desc *pins; struct rza1_port *ports; unsigned int i; int ret; pins = devm_kcalloc(rza1_pctl->dev, RZA1_NPINS, sizeof(*pins), GFP_KERNEL); ports = devm_kcalloc(rza1_pctl->dev, RZA1_NPORTS, sizeof(*ports), GFP_KERNEL); if (!pins || !ports) return -ENOMEM; rza1_pctl->pins = pins; rza1_pctl->desc.pins = pins; rza1_pctl->desc.npins = RZA1_NPINS; rza1_pctl->ports = ports; for (i = 0; i < RZA1_NPINS; ++i) { unsigned int pin = RZA1_PIN_ID_TO_PIN(i); unsigned int port = RZA1_PIN_ID_TO_PORT(i); pins[i].number = i; pins[i].name = devm_kasprintf(rza1_pctl->dev, GFP_KERNEL, "P%u-%u", port, pin); if (!pins[i].name) return -ENOMEM; if (i % RZA1_PINS_PER_PORT == 0) { /* * Setup ports; * they provide per-port lock and logical base address. */ unsigned int port_id = RZA1_PIN_ID_TO_PORT(i); ports[port_id].id = port_id; ports[port_id].base = rza1_pctl->base; ports[port_id].pins = &pins[i]; spin_lock_init(&ports[port_id].lock); } } ret = devm_pinctrl_register_and_init(rza1_pctl->dev, &rza1_pctl->desc, rza1_pctl, &rza1_pctl->pctl); if (ret) { dev_err(rza1_pctl->dev, "RZ/A1 pin controller registration failed\n"); return ret; } ret = pinctrl_enable(rza1_pctl->pctl); if (ret) { dev_err(rza1_pctl->dev, "RZ/A1 pin controller failed to start\n"); return ret; } ret = rza1_gpio_register(rza1_pctl); if (ret) { dev_err(rza1_pctl->dev, "RZ/A1 GPIO registration failed\n"); return ret; } return 0; } static int rza1_pinctrl_probe(struct platform_device *pdev) { struct rza1_pinctrl *rza1_pctl; int ret; rza1_pctl = devm_kzalloc(&pdev->dev, sizeof(*rza1_pctl), GFP_KERNEL); if (!rza1_pctl) return -ENOMEM; rza1_pctl->dev = &pdev->dev; rza1_pctl->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rza1_pctl->base)) return PTR_ERR(rza1_pctl->base); mutex_init(&rza1_pctl->mutex); platform_set_drvdata(pdev, rza1_pctl); rza1_pctl->desc.name = DRIVER_NAME; rza1_pctl->desc.pctlops = &rza1_pinctrl_ops; rza1_pctl->desc.pmxops = &rza1_pinmux_ops; rza1_pctl->desc.owner = THIS_MODULE; rza1_pctl->data = of_device_get_match_data(&pdev->dev); ret = rza1_pinctrl_register(rza1_pctl); if (ret) return ret; dev_info(&pdev->dev, "RZ/A1 pin controller and gpio successfully registered\n"); return 0; } static const struct of_device_id rza1_pinctrl_of_match[] = { { /* RZ/A1H, RZ/A1M */ .compatible = "renesas,r7s72100-ports", .data = &rza1h_pmx_conf, }, { /* RZ/A1L */ .compatible = "renesas,r7s72102-ports", .data = &rza1l_pmx_conf, }, { /* sentinel */ } }; static struct platform_driver rza1_pinctrl_driver = { .driver = { .name = DRIVER_NAME, .of_match_table = rza1_pinctrl_of_match, }, .probe = rza1_pinctrl_probe, }; static int __init rza1_pinctrl_init(void) { return platform_driver_register(&rza1_pinctrl_driver); } core_initcall(rza1_pinctrl_init); MODULE_AUTHOR("Jacopo Mondi <[email protected]"); MODULE_DESCRIPTION("Pin and gpio controller driver for Reneas RZ/A1 SoC");
// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * SCSI RDMA Protocol lib functions * * Copyright (C) 2006 FUJITA Tomonori <[email protected]> * Copyright (C) 2016 Bryant G. Ly <[email protected]> IBM Corp. * ***********************************************************************/ #define pr_fmt(fmt) "libsrp: " fmt #include <linux/printk.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/kfifo.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <scsi/srp.h> #include <target/target_core_base.h> #include "libsrp.h" #include "ibmvscsi_tgt.h" static int srp_iu_pool_alloc(struct srp_queue *q, size_t max, struct srp_buf **ring) { struct iu_entry *iue; int i; q->pool = kcalloc(max, sizeof(struct iu_entry *), GFP_KERNEL); if (!q->pool) return -ENOMEM; q->items = kcalloc(max, sizeof(struct iu_entry), GFP_KERNEL); if (!q->items) goto free_pool; spin_lock_init(&q->lock); kfifo_init(&q->queue, (void *)q->pool, max * sizeof(void *)); for (i = 0, iue = q->items; i < max; i++) { kfifo_in(&q->queue, (void *)&iue, sizeof(void *)); iue->sbuf = ring[i]; iue++; } return 0; free_pool: kfree(q->pool); return -ENOMEM; } static void srp_iu_pool_free(struct srp_queue *q) { kfree(q->items); kfree(q->pool); } static struct srp_buf **srp_ring_alloc(struct device *dev, size_t max, size_t size) { struct srp_buf **ring; int i; ring = kcalloc(max, sizeof(struct srp_buf *), GFP_KERNEL); if (!ring) return NULL; for (i = 0; i < max; i++) { ring[i] = kzalloc(sizeof(*ring[i]), GFP_KERNEL); if (!ring[i]) goto out; ring[i]->buf = dma_alloc_coherent(dev, size, &ring[i]->dma, GFP_KERNEL); if (!ring[i]->buf) goto out; } return ring; out: for (i = 0; i < max && ring[i]; i++) { if (ring[i]->buf) { dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); } kfree(ring[i]); } kfree(ring); return NULL; } static void srp_ring_free(struct device *dev, struct srp_buf **ring, size_t max, size_t size) { int i; for (i = 0; i < max; i++) { dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma); kfree(ring[i]); } kfree(ring); } int srp_target_alloc(struct srp_target *target, struct device *dev, size_t nr, size_t iu_size) { int err; spin_lock_init(&target->lock); target->dev = dev; target->srp_iu_size = iu_size; target->rx_ring_size = nr; target->rx_ring = srp_ring_alloc(target->dev, nr, iu_size); if (!target->rx_ring) return -ENOMEM; err = srp_iu_pool_alloc(&target->iu_queue, nr, target->rx_ring); if (err) goto free_ring; dev_set_drvdata(target->dev, target); return 0; free_ring: srp_ring_free(target->dev, target->rx_ring, nr, iu_size); return -ENOMEM; } void srp_target_free(struct srp_target *target) { dev_set_drvdata(target->dev, NULL); srp_ring_free(target->dev, target->rx_ring, target->rx_ring_size, target->srp_iu_size); srp_iu_pool_free(&target->iu_queue); } struct iu_entry *srp_iu_get(struct srp_target *target) { struct iu_entry *iue = NULL; if (kfifo_out_locked(&target->iu_queue.queue, (void *)&iue, sizeof(void *), &target->iu_queue.lock) != sizeof(void *)) { WARN_ONCE(1, "unexpected fifo state"); return NULL; } if (!iue) return iue; iue->target = target; iue->flags = 0; return iue; } void srp_iu_put(struct iu_entry *iue) { kfifo_in_locked(&iue->target->iu_queue.queue, (void *)&iue, sizeof(void *), &iue->target->iu_queue.lock); } static int srp_direct_data(struct ibmvscsis_cmd *cmd, struct srp_direct_buf *md, enum dma_data_direction dir, srp_rdma_t rdma_io, int dma_map, int ext_desc) { struct iu_entry *iue = NULL; struct scatterlist *sg = NULL; int err, nsg = 0, len; if (dma_map) { iue = cmd->iue; sg = cmd->se_cmd.t_data_sg; nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents, DMA_BIDIRECTIONAL); if (!nsg) { pr_err("fail to map %p %d\n", iue, cmd->se_cmd.t_data_nents); return 0; } len = min(cmd->se_cmd.data_length, be32_to_cpu(md->len)); } else { len = be32_to_cpu(md->len); } err = rdma_io(cmd, sg, nsg, md, 1, dir, len); if (dma_map) dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); return err; } static int srp_indirect_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd, struct srp_indirect_buf *id, enum dma_data_direction dir, srp_rdma_t rdma_io, int dma_map, int ext_desc) { struct iu_entry *iue = NULL; struct srp_direct_buf *md = NULL; struct scatterlist dummy, *sg = NULL; dma_addr_t token = 0; int err = 0; int nmd, nsg = 0, len; if (dma_map || ext_desc) { iue = cmd->iue; sg = cmd->se_cmd.t_data_sg; } nmd = be32_to_cpu(id->table_desc.len) / sizeof(struct srp_direct_buf); if ((dir == DMA_FROM_DEVICE && nmd == srp_cmd->data_in_desc_cnt) || (dir == DMA_TO_DEVICE && nmd == srp_cmd->data_out_desc_cnt)) { md = &id->desc_list[0]; goto rdma; } if (ext_desc && dma_map) { md = dma_alloc_coherent(iue->target->dev, be32_to_cpu(id->table_desc.len), &token, GFP_KERNEL); if (!md) { pr_err("Can't get dma memory %u\n", be32_to_cpu(id->table_desc.len)); return -ENOMEM; } sg_init_one(&dummy, md, be32_to_cpu(id->table_desc.len)); sg_dma_address(&dummy) = token; sg_dma_len(&dummy) = be32_to_cpu(id->table_desc.len); err = rdma_io(cmd, &dummy, 1, &id->table_desc, 1, DMA_TO_DEVICE, be32_to_cpu(id->table_desc.len)); if (err) { pr_err("Error copying indirect table %d\n", err); goto free_mem; } } else { pr_err("This command uses external indirect buffer\n"); return -EINVAL; } rdma: if (dma_map) { nsg = dma_map_sg(iue->target->dev, sg, cmd->se_cmd.t_data_nents, DMA_BIDIRECTIONAL); if (!nsg) { pr_err("fail to map %p %d\n", iue, cmd->se_cmd.t_data_nents); err = -EIO; goto free_mem; } len = min(cmd->se_cmd.data_length, be32_to_cpu(id->len)); } else { len = be32_to_cpu(id->len); } err = rdma_io(cmd, sg, nsg, md, nmd, dir, len); if (dma_map) dma_unmap_sg(iue->target->dev, sg, nsg, DMA_BIDIRECTIONAL); free_mem: if (token && dma_map) { dma_free_coherent(iue->target->dev, be32_to_cpu(id->table_desc.len), md, token); } return err; } static int data_out_desc_size(struct srp_cmd *cmd) { int size = 0; u8 fmt = cmd->buf_fmt >> 4; switch (fmt) { case SRP_NO_DATA_DESC: break; case SRP_DATA_DESC_DIRECT: size = sizeof(struct srp_direct_buf); break; case SRP_DATA_DESC_INDIRECT: size = sizeof(struct srp_indirect_buf) + sizeof(struct srp_direct_buf) * cmd->data_out_desc_cnt; break; default: pr_err("client error. Invalid data_out_format %x\n", fmt); break; } return size; } /* * TODO: this can be called multiple times for a single command if it * has very long data. */ int srp_transfer_data(struct ibmvscsis_cmd *cmd, struct srp_cmd *srp_cmd, srp_rdma_t rdma_io, int dma_map, int ext_desc) { struct srp_direct_buf *md; struct srp_indirect_buf *id; enum dma_data_direction dir; int offset, err = 0; u8 format; if (!cmd->se_cmd.t_data_nents) return 0; offset = srp_cmd->add_cdb_len & ~3; dir = srp_cmd_direction(srp_cmd); if (dir == DMA_FROM_DEVICE) offset += data_out_desc_size(srp_cmd); if (dir == DMA_TO_DEVICE) format = srp_cmd->buf_fmt >> 4; else format = srp_cmd->buf_fmt & ((1U << 4) - 1); switch (format) { case SRP_NO_DATA_DESC: break; case SRP_DATA_DESC_DIRECT: md = (struct srp_direct_buf *)(srp_cmd->add_data + offset); err = srp_direct_data(cmd, md, dir, rdma_io, dma_map, ext_desc); break; case SRP_DATA_DESC_INDIRECT: id = (struct srp_indirect_buf *)(srp_cmd->add_data + offset); err = srp_indirect_data(cmd, srp_cmd, id, dir, rdma_io, dma_map, ext_desc); break; default: pr_err("Unknown format %d %x\n", dir, format); err = -EINVAL; } return err; } u64 srp_data_length(struct srp_cmd *cmd, enum dma_data_direction dir) { struct srp_direct_buf *md; struct srp_indirect_buf *id; u64 len = 0; uint offset = cmd->add_cdb_len & ~3; u8 fmt; if (dir == DMA_TO_DEVICE) { fmt = cmd->buf_fmt >> 4; } else { fmt = cmd->buf_fmt & ((1U << 4) - 1); offset += data_out_desc_size(cmd); } switch (fmt) { case SRP_NO_DATA_DESC: break; case SRP_DATA_DESC_DIRECT: md = (struct srp_direct_buf *)(cmd->add_data + offset); len = be32_to_cpu(md->len); break; case SRP_DATA_DESC_INDIRECT: id = (struct srp_indirect_buf *)(cmd->add_data + offset); len = be32_to_cpu(id->len); break; default: pr_err("invalid data format %x\n", fmt); break; } return len; } int srp_get_desc_table(struct srp_cmd *srp_cmd, enum dma_data_direction *dir, u64 *data_len) { struct srp_indirect_buf *idb; struct srp_direct_buf *db; uint add_cdb_offset; int rc; /* * The pointer computations below will only be compiled correctly * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check * whether srp_cmd::add_data has been declared as a byte pointer. */ BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0) && !__same_type(srp_cmd->add_data[0], (u8)0)); BUG_ON(!dir); BUG_ON(!data_len); rc = 0; *data_len = 0; *dir = DMA_NONE; if (srp_cmd->buf_fmt & 0xf) *dir = DMA_FROM_DEVICE; else if (srp_cmd->buf_fmt >> 4) *dir = DMA_TO_DEVICE; add_cdb_offset = srp_cmd->add_cdb_len & ~3; if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) || ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) { db = (struct srp_direct_buf *)(srp_cmd->add_data + add_cdb_offset); *data_len = be32_to_cpu(db->len); } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) || ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) { idb = (struct srp_indirect_buf *)(srp_cmd->add_data + add_cdb_offset); *data_len = be32_to_cpu(idb->len); } return rc; } MODULE_DESCRIPTION("SCSI RDMA Protocol lib functions"); MODULE_AUTHOR("FUJITA Tomonori"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-only */ #ifndef _WM8737_H #define _WM8737_H /* * wm8737.c -- WM8523 ALSA SoC Audio driver * * Copyright 2010 Wolfson Microelectronics plc * * Author: Mark Brown <[email protected]> */ /* * Register values. */ #define WM8737_LEFT_PGA_VOLUME 0x00 #define WM8737_RIGHT_PGA_VOLUME 0x01 #define WM8737_AUDIO_PATH_L 0x02 #define WM8737_AUDIO_PATH_R 0x03 #define WM8737_3D_ENHANCE 0x04 #define WM8737_ADC_CONTROL 0x05 #define WM8737_POWER_MANAGEMENT 0x06 #define WM8737_AUDIO_FORMAT 0x07 #define WM8737_CLOCKING 0x08 #define WM8737_MIC_PREAMP_CONTROL 0x09 #define WM8737_MISC_BIAS_CONTROL 0x0A #define WM8737_NOISE_GATE 0x0B #define WM8737_ALC1 0x0C #define WM8737_ALC2 0x0D #define WM8737_ALC3 0x0E #define WM8737_RESET 0x0F #define WM8737_REGISTER_COUNT 16 #define WM8737_MAX_REGISTER 0x0F /* * Field Definitions. */ /* * R0 (0x00) - Left PGA volume */ #define WM8737_LVU 0x0100 /* LVU */ #define WM8737_LVU_MASK 0x0100 /* LVU */ #define WM8737_LVU_SHIFT 8 /* LVU */ #define WM8737_LVU_WIDTH 1 /* LVU */ #define WM8737_LINVOL_MASK 0x00FF /* LINVOL - [7:0] */ #define WM8737_LINVOL_SHIFT 0 /* LINVOL - [7:0] */ #define WM8737_LINVOL_WIDTH 8 /* LINVOL - [7:0] */ /* * R1 (0x01) - Right PGA volume */ #define WM8737_RVU 0x0100 /* RVU */ #define WM8737_RVU_MASK 0x0100 /* RVU */ #define WM8737_RVU_SHIFT 8 /* RVU */ #define WM8737_RVU_WIDTH 1 /* RVU */ #define WM8737_RINVOL_MASK 0x00FF /* RINVOL - [7:0] */ #define WM8737_RINVOL_SHIFT 0 /* RINVOL - [7:0] */ #define WM8737_RINVOL_WIDTH 8 /* RINVOL - [7:0] */ /* * R2 (0x02) - AUDIO path L */ #define WM8737_LINSEL_MASK 0x0180 /* LINSEL - [8:7] */ #define WM8737_LINSEL_SHIFT 7 /* LINSEL - [8:7] */ #define WM8737_LINSEL_WIDTH 2 /* LINSEL - [8:7] */ #define WM8737_LMICBOOST_MASK 0x0060 /* LMICBOOST - [6:5] */ #define WM8737_LMICBOOST_SHIFT 5 /* LMICBOOST - [6:5] */ #define WM8737_LMICBOOST_WIDTH 2 /* LMICBOOST - [6:5] */ #define WM8737_LMBE 0x0010 /* LMBE */ #define WM8737_LMBE_MASK 0x0010 /* LMBE */ #define WM8737_LMBE_SHIFT 4 /* LMBE */ #define WM8737_LMBE_WIDTH 1 /* LMBE */ #define WM8737_LMZC 0x0008 /* LMZC */ #define WM8737_LMZC_MASK 0x0008 /* LMZC */ #define WM8737_LMZC_SHIFT 3 /* LMZC */ #define WM8737_LMZC_WIDTH 1 /* LMZC */ #define WM8737_LPZC 0x0004 /* LPZC */ #define WM8737_LPZC_MASK 0x0004 /* LPZC */ #define WM8737_LPZC_SHIFT 2 /* LPZC */ #define WM8737_LPZC_WIDTH 1 /* LPZC */ #define WM8737_LZCTO_MASK 0x0003 /* LZCTO - [1:0] */ #define WM8737_LZCTO_SHIFT 0 /* LZCTO - [1:0] */ #define WM8737_LZCTO_WIDTH 2 /* LZCTO - [1:0] */ /* * R3 (0x03) - AUDIO path R */ #define WM8737_RINSEL_MASK 0x0180 /* RINSEL - [8:7] */ #define WM8737_RINSEL_SHIFT 7 /* RINSEL - [8:7] */ #define WM8737_RINSEL_WIDTH 2 /* RINSEL - [8:7] */ #define WM8737_RMICBOOST_MASK 0x0060 /* RMICBOOST - [6:5] */ #define WM8737_RMICBOOST_SHIFT 5 /* RMICBOOST - [6:5] */ #define WM8737_RMICBOOST_WIDTH 2 /* RMICBOOST - [6:5] */ #define WM8737_RMBE 0x0010 /* RMBE */ #define WM8737_RMBE_MASK 0x0010 /* RMBE */ #define WM8737_RMBE_SHIFT 4 /* RMBE */ #define WM8737_RMBE_WIDTH 1 /* RMBE */ #define WM8737_RMZC 0x0008 /* RMZC */ #define WM8737_RMZC_MASK 0x0008 /* RMZC */ #define WM8737_RMZC_SHIFT 3 /* RMZC */ #define WM8737_RMZC_WIDTH 1 /* RMZC */ #define WM8737_RPZC 0x0004 /* RPZC */ #define WM8737_RPZC_MASK 0x0004 /* RPZC */ #define WM8737_RPZC_SHIFT 2 /* RPZC */ #define WM8737_RPZC_WIDTH 1 /* RPZC */ #define WM8737_RZCTO_MASK 0x0003 /* RZCTO - [1:0] */ #define WM8737_RZCTO_SHIFT 0 /* RZCTO - [1:0] */ #define WM8737_RZCTO_WIDTH 2 /* RZCTO - [1:0] */ /* * R4 (0x04) - 3D Enhance */ #define WM8737_DIV2 0x0080 /* DIV2 */ #define WM8737_DIV2_MASK 0x0080 /* DIV2 */ #define WM8737_DIV2_SHIFT 7 /* DIV2 */ #define WM8737_DIV2_WIDTH 1 /* DIV2 */ #define WM8737_3DLC 0x0040 /* 3DLC */ #define WM8737_3DLC_MASK 0x0040 /* 3DLC */ #define WM8737_3DLC_SHIFT 6 /* 3DLC */ #define WM8737_3DLC_WIDTH 1 /* 3DLC */ #define WM8737_3DUC 0x0020 /* 3DUC */ #define WM8737_3DUC_MASK 0x0020 /* 3DUC */ #define WM8737_3DUC_SHIFT 5 /* 3DUC */ #define WM8737_3DUC_WIDTH 1 /* 3DUC */ #define WM8737_3DDEPTH_MASK 0x001E /* 3DDEPTH - [4:1] */ #define WM8737_3DDEPTH_SHIFT 1 /* 3DDEPTH - [4:1] */ #define WM8737_3DDEPTH_WIDTH 4 /* 3DDEPTH - [4:1] */ #define WM8737_3DE 0x0001 /* 3DE */ #define WM8737_3DE_MASK 0x0001 /* 3DE */ #define WM8737_3DE_SHIFT 0 /* 3DE */ #define WM8737_3DE_WIDTH 1 /* 3DE */ /* * R5 (0x05) - ADC Control */ #define WM8737_MONOMIX_MASK 0x0180 /* MONOMIX - [8:7] */ #define WM8737_MONOMIX_SHIFT 7 /* MONOMIX - [8:7] */ #define WM8737_MONOMIX_WIDTH 2 /* MONOMIX - [8:7] */ #define WM8737_POLARITY_MASK 0x0060 /* POLARITY - [6:5] */ #define WM8737_POLARITY_SHIFT 5 /* POLARITY - [6:5] */ #define WM8737_POLARITY_WIDTH 2 /* POLARITY - [6:5] */ #define WM8737_HPOR 0x0010 /* HPOR */ #define WM8737_HPOR_MASK 0x0010 /* HPOR */ #define WM8737_HPOR_SHIFT 4 /* HPOR */ #define WM8737_HPOR_WIDTH 1 /* HPOR */ #define WM8737_LP 0x0004 /* LP */ #define WM8737_LP_MASK 0x0004 /* LP */ #define WM8737_LP_SHIFT 2 /* LP */ #define WM8737_LP_WIDTH 1 /* LP */ #define WM8737_MONOUT 0x0002 /* MONOUT */ #define WM8737_MONOUT_MASK 0x0002 /* MONOUT */ #define WM8737_MONOUT_SHIFT 1 /* MONOUT */ #define WM8737_MONOUT_WIDTH 1 /* MONOUT */ #define WM8737_ADCHPD 0x0001 /* ADCHPD */ #define WM8737_ADCHPD_MASK 0x0001 /* ADCHPD */ #define WM8737_ADCHPD_SHIFT 0 /* ADCHPD */ #define WM8737_ADCHPD_WIDTH 1 /* ADCHPD */ /* * R6 (0x06) - Power Management */ #define WM8737_VMID 0x0100 /* VMID */ #define WM8737_VMID_MASK 0x0100 /* VMID */ #define WM8737_VMID_SHIFT 8 /* VMID */ #define WM8737_VMID_WIDTH 1 /* VMID */ #define WM8737_VREF 0x0080 /* VREF */ #define WM8737_VREF_MASK 0x0080 /* VREF */ #define WM8737_VREF_SHIFT 7 /* VREF */ #define WM8737_VREF_WIDTH 1 /* VREF */ #define WM8737_AI 0x0040 /* AI */ #define WM8737_AI_MASK 0x0040 /* AI */ #define WM8737_AI_SHIFT 6 /* AI */ #define WM8737_AI_WIDTH 1 /* AI */ #define WM8737_PGL 0x0020 /* PGL */ #define WM8737_PGL_MASK 0x0020 /* PGL */ #define WM8737_PGL_SHIFT 5 /* PGL */ #define WM8737_PGL_WIDTH 1 /* PGL */ #define WM8737_PGR 0x0010 /* PGR */ #define WM8737_PGR_MASK 0x0010 /* PGR */ #define WM8737_PGR_SHIFT 4 /* PGR */ #define WM8737_PGR_WIDTH 1 /* PGR */ #define WM8737_ADL 0x0008 /* ADL */ #define WM8737_ADL_MASK 0x0008 /* ADL */ #define WM8737_ADL_SHIFT 3 /* ADL */ #define WM8737_ADL_WIDTH 1 /* ADL */ #define WM8737_ADR 0x0004 /* ADR */ #define WM8737_ADR_MASK 0x0004 /* ADR */ #define WM8737_ADR_SHIFT 2 /* ADR */ #define WM8737_ADR_WIDTH 1 /* ADR */ #define WM8737_MICBIAS_MASK 0x0003 /* MICBIAS - [1:0] */ #define WM8737_MICBIAS_SHIFT 0 /* MICBIAS - [1:0] */ #define WM8737_MICBIAS_WIDTH 2 /* MICBIAS - [1:0] */ /* * R7 (0x07) - Audio Format */ #define WM8737_SDODIS 0x0080 /* SDODIS */ #define WM8737_SDODIS_MASK 0x0080 /* SDODIS */ #define WM8737_SDODIS_SHIFT 7 /* SDODIS */ #define WM8737_SDODIS_WIDTH 1 /* SDODIS */ #define WM8737_MS 0x0040 /* MS */ #define WM8737_MS_MASK 0x0040 /* MS */ #define WM8737_MS_SHIFT 6 /* MS */ #define WM8737_MS_WIDTH 1 /* MS */ #define WM8737_LRP 0x0010 /* LRP */ #define WM8737_LRP_MASK 0x0010 /* LRP */ #define WM8737_LRP_SHIFT 4 /* LRP */ #define WM8737_LRP_WIDTH 1 /* LRP */ #define WM8737_WL_MASK 0x000C /* WL - [3:2] */ #define WM8737_WL_SHIFT 2 /* WL - [3:2] */ #define WM8737_WL_WIDTH 2 /* WL - [3:2] */ #define WM8737_FORMAT_MASK 0x0003 /* FORMAT - [1:0] */ #define WM8737_FORMAT_SHIFT 0 /* FORMAT - [1:0] */ #define WM8737_FORMAT_WIDTH 2 /* FORMAT - [1:0] */ /* * R8 (0x08) - Clocking */ #define WM8737_AUTODETECT 0x0080 /* AUTODETECT */ #define WM8737_AUTODETECT_MASK 0x0080 /* AUTODETECT */ #define WM8737_AUTODETECT_SHIFT 7 /* AUTODETECT */ #define WM8737_AUTODETECT_WIDTH 1 /* AUTODETECT */ #define WM8737_CLKDIV2 0x0040 /* CLKDIV2 */ #define WM8737_CLKDIV2_MASK 0x0040 /* CLKDIV2 */ #define WM8737_CLKDIV2_SHIFT 6 /* CLKDIV2 */ #define WM8737_CLKDIV2_WIDTH 1 /* CLKDIV2 */ #define WM8737_SR_MASK 0x003E /* SR - [5:1] */ #define WM8737_SR_SHIFT 1 /* SR - [5:1] */ #define WM8737_SR_WIDTH 5 /* SR - [5:1] */ #define WM8737_USB_MODE 0x0001 /* USB MODE */ #define WM8737_USB_MODE_MASK 0x0001 /* USB MODE */ #define WM8737_USB_MODE_SHIFT 0 /* USB MODE */ #define WM8737_USB_MODE_WIDTH 1 /* USB MODE */ /* * R9 (0x09) - MIC Preamp Control */ #define WM8737_RBYPEN 0x0008 /* RBYPEN */ #define WM8737_RBYPEN_MASK 0x0008 /* RBYPEN */ #define WM8737_RBYPEN_SHIFT 3 /* RBYPEN */ #define WM8737_RBYPEN_WIDTH 1 /* RBYPEN */ #define WM8737_LBYPEN 0x0004 /* LBYPEN */ #define WM8737_LBYPEN_MASK 0x0004 /* LBYPEN */ #define WM8737_LBYPEN_SHIFT 2 /* LBYPEN */ #define WM8737_LBYPEN_WIDTH 1 /* LBYPEN */ #define WM8737_MBCTRL_MASK 0x0003 /* MBCTRL - [1:0] */ #define WM8737_MBCTRL_SHIFT 0 /* MBCTRL - [1:0] */ #define WM8737_MBCTRL_WIDTH 2 /* MBCTRL - [1:0] */ /* * R10 (0x0A) - Misc Bias Control */ #define WM8737_VMIDSEL_MASK 0x000C /* VMIDSEL - [3:2] */ #define WM8737_VMIDSEL_SHIFT 2 /* VMIDSEL - [3:2] */ #define WM8737_VMIDSEL_WIDTH 2 /* VMIDSEL - [3:2] */ #define WM8737_LINPUT1_DC_BIAS_ENABLE 0x0002 /* LINPUT1 DC BIAS ENABLE */ #define WM8737_LINPUT1_DC_BIAS_ENABLE_MASK 0x0002 /* LINPUT1 DC BIAS ENABLE */ #define WM8737_LINPUT1_DC_BIAS_ENABLE_SHIFT 1 /* LINPUT1 DC BIAS ENABLE */ #define WM8737_LINPUT1_DC_BIAS_ENABLE_WIDTH 1 /* LINPUT1 DC BIAS ENABLE */ #define WM8737_RINPUT1_DC_BIAS_ENABLE 0x0001 /* RINPUT1 DC BIAS ENABLE */ #define WM8737_RINPUT1_DC_BIAS_ENABLE_MASK 0x0001 /* RINPUT1 DC BIAS ENABLE */ #define WM8737_RINPUT1_DC_BIAS_ENABLE_SHIFT 0 /* RINPUT1 DC BIAS ENABLE */ #define WM8737_RINPUT1_DC_BIAS_ENABLE_WIDTH 1 /* RINPUT1 DC BIAS ENABLE */ /* * R11 (0x0B) - Noise Gate */ #define WM8737_NGTH_MASK 0x001C /* NGTH - [4:2] */ #define WM8737_NGTH_SHIFT 2 /* NGTH - [4:2] */ #define WM8737_NGTH_WIDTH 3 /* NGTH - [4:2] */ #define WM8737_NGAT 0x0001 /* NGAT */ #define WM8737_NGAT_MASK 0x0001 /* NGAT */ #define WM8737_NGAT_SHIFT 0 /* NGAT */ #define WM8737_NGAT_WIDTH 1 /* NGAT */ /* * R12 (0x0C) - ALC1 */ #define WM8737_ALCSEL_MASK 0x0180 /* ALCSEL - [8:7] */ #define WM8737_ALCSEL_SHIFT 7 /* ALCSEL - [8:7] */ #define WM8737_ALCSEL_WIDTH 2 /* ALCSEL - [8:7] */ #define WM8737_MAX_GAIN_MASK 0x0070 /* MAX GAIN - [6:4] */ #define WM8737_MAX_GAIN_SHIFT 4 /* MAX GAIN - [6:4] */ #define WM8737_MAX_GAIN_WIDTH 3 /* MAX GAIN - [6:4] */ #define WM8737_ALCL_MASK 0x000F /* ALCL - [3:0] */ #define WM8737_ALCL_SHIFT 0 /* ALCL - [3:0] */ #define WM8737_ALCL_WIDTH 4 /* ALCL - [3:0] */ /* * R13 (0x0D) - ALC2 */ #define WM8737_ALCZCE 0x0010 /* ALCZCE */ #define WM8737_ALCZCE_MASK 0x0010 /* ALCZCE */ #define WM8737_ALCZCE_SHIFT 4 /* ALCZCE */ #define WM8737_ALCZCE_WIDTH 1 /* ALCZCE */ #define WM8737_HLD_MASK 0x000F /* HLD - [3:0] */ #define WM8737_HLD_SHIFT 0 /* HLD - [3:0] */ #define WM8737_HLD_WIDTH 4 /* HLD - [3:0] */ /* * R14 (0x0E) - ALC3 */ #define WM8737_DCY_MASK 0x00F0 /* DCY - [7:4] */ #define WM8737_DCY_SHIFT 4 /* DCY - [7:4] */ #define WM8737_DCY_WIDTH 4 /* DCY - [7:4] */ #define WM8737_ATK_MASK 0x000F /* ATK - [3:0] */ #define WM8737_ATK_SHIFT 0 /* ATK - [3:0] */ #define WM8737_ATK_WIDTH 4 /* ATK - [3:0] */ /* * R15 (0x0F) - Reset */ #define WM8737_RESET_MASK 0x01FF /* RESET - [8:0] */ #define WM8737_RESET_SHIFT 0 /* RESET - [8:0] */ #define WM8737_RESET_WIDTH 9 /* RESET - [8:0] */ #endif
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright 2012 Armadeus Systems - <[email protected]> * Copyright 2012 Laurent Cans <[email protected]> * * Based on mx51-babbage.dts * Copyright 2011 Freescale Semiconductor, Inc. * Copyright 2011 Linaro Ltd. */ /dts-v1/; #include "imx51.dtsi" / { model = "Armadeus Systems APF51 module"; compatible = "armadeus,imx51-apf51", "fsl,imx51"; memory@90000000 { device_type = "memory"; reg = <0x90000000 0x20000000>; }; clocks { osc { clock-frequency = <33554432>; }; }; }; &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_fec>; phy-mode = "mii"; phy-reset-gpios = <&gpio3 0 GPIO_ACTIVE_LOW>; phy-reset-duration = <1>; status = "okay"; }; &iomuxc { pinctrl_fec: fecgrp { fsl,pins = < MX51_PAD_DI_GP3__FEC_TX_ER 0x80000000 MX51_PAD_DI2_PIN4__FEC_CRS 0x80000000 MX51_PAD_DI2_PIN2__FEC_MDC 0x80000000 MX51_PAD_DI2_PIN3__FEC_MDIO 0x80000000 MX51_PAD_DI2_DISP_CLK__FEC_RDATA1 0x80000000 MX51_PAD_DI_GP4__FEC_RDATA2 0x80000000 MX51_PAD_DISP2_DAT0__FEC_RDATA3 0x80000000 MX51_PAD_DISP2_DAT1__FEC_RX_ER 0x80000000 MX51_PAD_DISP2_DAT6__FEC_TDATA1 0x80000000 MX51_PAD_DISP2_DAT7__FEC_TDATA2 0x80000000 MX51_PAD_DISP2_DAT8__FEC_TDATA3 0x80000000 MX51_PAD_DISP2_DAT9__FEC_TX_EN 0x80000000 MX51_PAD_DISP2_DAT10__FEC_COL 0x80000000 MX51_PAD_DISP2_DAT11__FEC_RX_CLK 0x80000000 MX51_PAD_DISP2_DAT12__FEC_RX_DV 0x80000000 MX51_PAD_DISP2_DAT13__FEC_TX_CLK 0x80000000 MX51_PAD_DISP2_DAT14__FEC_RDATA0 0x80000000 MX51_PAD_DISP2_DAT15__FEC_TDATA0 0x80000000 >; }; pinctrl_uart3: uart3grp { fsl,pins = < MX51_PAD_UART3_RXD__UART3_RXD 0x1c5 MX51_PAD_UART3_TXD__UART3_TXD 0x1c5 >; }; }; &nfc { nand-bus-width = <8>; nand-ecc-mode = "hw"; nand-on-flash-bbt; status = "okay"; }; &uart3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart3>; status = "okay"; };
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Cryptographic Coprocessor (CCP) driver * * Copyright (C) 2013,2019 Advanced Micro Devices, Inc. * * Author: Tom Lendacky <[email protected]> * Author: Gary R Hook <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/spinlock_types.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/hw_random.h> #include <linux/cpu.h> #include <linux/atomic.h> #ifdef CONFIG_X86 #include <asm/cpu_device_id.h> #endif #include <linux/ccp.h> #include "ccp-dev.h" #define MAX_CCPS 32 /* Limit CCP use to a specifed number of queues per device */ static unsigned int nqueues; module_param(nqueues, uint, 0444); MODULE_PARM_DESC(nqueues, "Number of queues per CCP (minimum 1; default: all available)"); /* Limit the maximum number of configured CCPs */ static atomic_t dev_count = ATOMIC_INIT(0); static unsigned int max_devs = MAX_CCPS; module_param(max_devs, uint, 0444); MODULE_PARM_DESC(max_devs, "Maximum number of CCPs to enable (default: all; 0 disables all CCPs)"); struct ccp_tasklet_data { struct completion completion; struct ccp_cmd *cmd; }; /* Human-readable error strings */ #define CCP_MAX_ERROR_CODE 64 static char *ccp_error_codes[] = { "", "ILLEGAL_ENGINE", "ILLEGAL_KEY_ID", "ILLEGAL_FUNCTION_TYPE", "ILLEGAL_FUNCTION_MODE", "ILLEGAL_FUNCTION_ENCRYPT", "ILLEGAL_FUNCTION_SIZE", "Zlib_MISSING_INIT_EOM", "ILLEGAL_FUNCTION_RSVD", "ILLEGAL_BUFFER_LENGTH", "VLSB_FAULT", "ILLEGAL_MEM_ADDR", "ILLEGAL_MEM_SEL", "ILLEGAL_CONTEXT_ID", "ILLEGAL_KEY_ADDR", "0xF Reserved", "Zlib_ILLEGAL_MULTI_QUEUE", "Zlib_ILLEGAL_JOBID_CHANGE", "CMD_TIMEOUT", "IDMA0_AXI_SLVERR", "IDMA0_AXI_DECERR", "0x15 Reserved", "IDMA1_AXI_SLAVE_FAULT", "IDMA1_AIXI_DECERR", "0x18 Reserved", "ZLIBVHB_AXI_SLVERR", "ZLIBVHB_AXI_DECERR", "0x1B Reserved", "ZLIB_UNEXPECTED_EOM", "ZLIB_EXTRA_DATA", "ZLIB_BTYPE", "ZLIB_UNDEFINED_SYMBOL", "ZLIB_UNDEFINED_DISTANCE_S", "ZLIB_CODE_LENGTH_SYMBOL", "ZLIB _VHB_ILLEGAL_FETCH", "ZLIB_UNCOMPRESSED_LEN", "ZLIB_LIMIT_REACHED", "ZLIB_CHECKSUM_MISMATCH0", "ODMA0_AXI_SLVERR", "ODMA0_AXI_DECERR", "0x28 Reserved", "ODMA1_AXI_SLVERR", "ODMA1_AXI_DECERR", }; void ccp_log_error(struct ccp_device *d, unsigned int e) { if (WARN_ON(e >= CCP_MAX_ERROR_CODE)) return; if (e < ARRAY_SIZE(ccp_error_codes)) dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]); else dev_err(d->dev, "CCP error %d: Unknown Error\n", e); } /* List of CCPs, CCP count, read-write access lock, and access functions * * Lock structure: get ccp_unit_lock for reading whenever we need to * examine the CCP list. While holding it for reading we can acquire * the RR lock to update the round-robin next-CCP pointer. The unit lock * must be acquired before the RR lock. * * If the unit-lock is acquired for writing, we have total control over * the list, so there's no value in getting the RR lock. */ static DEFINE_RWLOCK(ccp_unit_lock); static LIST_HEAD(ccp_units); /* Round-robin counter */ static DEFINE_SPINLOCK(ccp_rr_lock); static struct ccp_device *ccp_rr; /** * ccp_add_device - add a CCP device to the list * * @ccp: ccp_device struct pointer * * Put this CCP on the unit list, which makes it available * for use. * * Returns zero if a CCP device is present, -ENODEV otherwise. */ void ccp_add_device(struct ccp_device *ccp) { unsigned long flags; write_lock_irqsave(&ccp_unit_lock, flags); list_add_tail(&ccp->entry, &ccp_units); if (!ccp_rr) /* We already have the list lock (we're first) so this * pointer can't change on us. Set its initial value. */ ccp_rr = ccp; write_unlock_irqrestore(&ccp_unit_lock, flags); } /** * ccp_del_device - remove a CCP device from the list * * @ccp: ccp_device struct pointer * * Remove this unit from the list of devices. If the next device * up for use is this one, adjust the pointer. If this is the last * device, NULL the pointer. */ void ccp_del_device(struct ccp_device *ccp) { unsigned long flags; write_lock_irqsave(&ccp_unit_lock, flags); if (ccp_rr == ccp) { /* ccp_unit_lock is read/write; any read access * will be suspended while we make changes to the * list and RR pointer. */ if (list_is_last(&ccp_rr->entry, &ccp_units)) ccp_rr = list_first_entry(&ccp_units, struct ccp_device, entry); else ccp_rr = list_next_entry(ccp_rr, entry); } list_del(&ccp->entry); if (list_empty(&ccp_units)) ccp_rr = NULL; write_unlock_irqrestore(&ccp_unit_lock, flags); } int ccp_register_rng(struct ccp_device *ccp) { int ret = 0; dev_dbg(ccp->dev, "Registering RNG...\n"); /* Register an RNG */ ccp->hwrng.name = ccp->rngname; ccp->hwrng.read = ccp_trng_read; ret = hwrng_register(&ccp->hwrng); if (ret) dev_err(ccp->dev, "error registering hwrng (%d)\n", ret); return ret; } void ccp_unregister_rng(struct ccp_device *ccp) { if (ccp->hwrng.name) hwrng_unregister(&ccp->hwrng); } static struct ccp_device *ccp_get_device(void) { unsigned long flags; struct ccp_device *dp = NULL; /* We round-robin through the unit list. * The (ccp_rr) pointer refers to the next unit to use. */ read_lock_irqsave(&ccp_unit_lock, flags); if (!list_empty(&ccp_units)) { spin_lock(&ccp_rr_lock); dp = ccp_rr; if (list_is_last(&ccp_rr->entry, &ccp_units)) ccp_rr = list_first_entry(&ccp_units, struct ccp_device, entry); else ccp_rr = list_next_entry(ccp_rr, entry); spin_unlock(&ccp_rr_lock); } read_unlock_irqrestore(&ccp_unit_lock, flags); return dp; } /** * ccp_present - check if a CCP device is present * * Returns zero if a CCP device is present, -ENODEV otherwise. */ int ccp_present(void) { unsigned long flags; int ret; read_lock_irqsave(&ccp_unit_lock, flags); ret = list_empty(&ccp_units); read_unlock_irqrestore(&ccp_unit_lock, flags); return ret ? -ENODEV : 0; } EXPORT_SYMBOL_GPL(ccp_present); /** * ccp_version - get the version of the CCP device * * Returns the version from the first unit on the list; * otherwise a zero if no CCP device is present */ unsigned int ccp_version(void) { struct ccp_device *dp; unsigned long flags; int ret = 0; read_lock_irqsave(&ccp_unit_lock, flags); if (!list_empty(&ccp_units)) { dp = list_first_entry(&ccp_units, struct ccp_device, entry); ret = dp->vdata->version; } read_unlock_irqrestore(&ccp_unit_lock, flags); return ret; } EXPORT_SYMBOL_GPL(ccp_version); /** * ccp_enqueue_cmd - queue an operation for processing by the CCP * * @cmd: ccp_cmd struct to be processed * * Queue a cmd to be processed by the CCP. If queueing the cmd * would exceed the defined length of the cmd queue the cmd will * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will * result in a return code of -EBUSY. * * The callback routine specified in the ccp_cmd struct will be * called to notify the caller of completion (if the cmd was not * backlogged) or advancement out of the backlog. If the cmd has * advanced out of the backlog the "err" value of the callback * will be -EINPROGRESS. Any other "err" value during callback is * the result of the operation. * * The cmd has been successfully queued if: * the return code is -EINPROGRESS or * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set */ int ccp_enqueue_cmd(struct ccp_cmd *cmd) { struct ccp_device *ccp; unsigned long flags; unsigned int i; int ret; /* Some commands might need to be sent to a specific device */ ccp = cmd->ccp ? cmd->ccp : ccp_get_device(); if (!ccp) return -ENODEV; /* Caller must supply a callback routine */ if (!cmd->callback) return -EINVAL; cmd->ccp = ccp; spin_lock_irqsave(&ccp->cmd_lock, flags); i = ccp->cmd_q_count; if (ccp->cmd_count >= MAX_CMD_QLEN) { if (cmd->flags & CCP_CMD_MAY_BACKLOG) { ret = -EBUSY; list_add_tail(&cmd->entry, &ccp->backlog); } else { ret = -ENOSPC; } } else { ret = -EINPROGRESS; ccp->cmd_count++; list_add_tail(&cmd->entry, &ccp->cmd); /* Find an idle queue */ if (!ccp->suspending) { for (i = 0; i < ccp->cmd_q_count; i++) { if (ccp->cmd_q[i].active) continue; break; } } } spin_unlock_irqrestore(&ccp->cmd_lock, flags); /* If we found an idle queue, wake it up */ if (i < ccp->cmd_q_count) wake_up_process(ccp->cmd_q[i].kthread); return ret; } EXPORT_SYMBOL_GPL(ccp_enqueue_cmd); static void ccp_do_cmd_backlog(struct work_struct *work) { struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); struct ccp_device *ccp = cmd->ccp; unsigned long flags; unsigned int i; cmd->callback(cmd->data, -EINPROGRESS); spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->cmd_count++; list_add_tail(&cmd->entry, &ccp->cmd); /* Find an idle queue */ for (i = 0; i < ccp->cmd_q_count; i++) { if (ccp->cmd_q[i].active) continue; break; } spin_unlock_irqrestore(&ccp->cmd_lock, flags); /* If we found an idle queue, wake it up */ if (i < ccp->cmd_q_count) wake_up_process(ccp->cmd_q[i].kthread); } static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) { struct ccp_device *ccp = cmd_q->ccp; struct ccp_cmd *cmd = NULL; struct ccp_cmd *backlog = NULL; unsigned long flags; spin_lock_irqsave(&ccp->cmd_lock, flags); cmd_q->active = 0; if (ccp->suspending) { cmd_q->suspended = 1; spin_unlock_irqrestore(&ccp->cmd_lock, flags); wake_up_interruptible(&ccp->suspend_queue); return NULL; } if (ccp->cmd_count) { cmd_q->active = 1; cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); list_del(&cmd->entry); ccp->cmd_count--; } if (!list_empty(&ccp->backlog)) { backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); list_del(&backlog->entry); } spin_unlock_irqrestore(&ccp->cmd_lock, flags); if (backlog) { INIT_WORK(&backlog->work, ccp_do_cmd_backlog); schedule_work(&backlog->work); } return cmd; } static void ccp_do_cmd_complete(unsigned long data) { struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; struct ccp_cmd *cmd = tdata->cmd; cmd->callback(cmd->data, cmd->ret); complete(&tdata->completion); } /** * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue * * @data: thread-specific data */ int ccp_cmd_queue_thread(void *data) { struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; struct ccp_cmd *cmd; struct ccp_tasklet_data tdata; struct tasklet_struct tasklet; tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata); set_current_state(TASK_INTERRUPTIBLE); while (!kthread_should_stop()) { schedule(); set_current_state(TASK_INTERRUPTIBLE); cmd = ccp_dequeue_cmd(cmd_q); if (!cmd) continue; __set_current_state(TASK_RUNNING); /* Execute the command */ cmd->ret = ccp_run_cmd(cmd_q, cmd); /* Schedule the completion callback */ tdata.cmd = cmd; init_completion(&tdata.completion); tasklet_schedule(&tasklet); wait_for_completion(&tdata.completion); } __set_current_state(TASK_RUNNING); return 0; } /** * ccp_alloc_struct - allocate and initialize the ccp_device struct * * @sp: sp_device struct of the CCP */ struct ccp_device *ccp_alloc_struct(struct sp_device *sp) { struct device *dev = sp->dev; struct ccp_device *ccp; ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); if (!ccp) return NULL; ccp->dev = dev; ccp->sp = sp; ccp->axcache = sp->axcache; INIT_LIST_HEAD(&ccp->cmd); INIT_LIST_HEAD(&ccp->backlog); spin_lock_init(&ccp->cmd_lock); mutex_init(&ccp->req_mutex); mutex_init(&ccp->sb_mutex); ccp->sb_count = KSB_COUNT; ccp->sb_start = 0; /* Initialize the wait queues */ init_waitqueue_head(&ccp->sb_queue); init_waitqueue_head(&ccp->suspend_queue); snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord); snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord); return ccp; } int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) { struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); u32 trng_value; int len = min_t(int, sizeof(trng_value), max); /* Locking is provided by the caller so we can update device * hwrng-related fields safely */ trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); if (!trng_value) { /* Zero is returned if not data is available or if a * bad-entropy error is present. Assume an error if * we exceed TRNG_RETRIES reads of zero. */ if (ccp->hwrng_retries++ > TRNG_RETRIES) return -EIO; return 0; } /* Reset the counter and save the rng value */ ccp->hwrng_retries = 0; memcpy(data, &trng_value, len); return len; } bool ccp_queues_suspended(struct ccp_device *ccp) { unsigned int suspended = 0; unsigned long flags; unsigned int i; spin_lock_irqsave(&ccp->cmd_lock, flags); for (i = 0; i < ccp->cmd_q_count; i++) if (ccp->cmd_q[i].suspended) suspended++; spin_unlock_irqrestore(&ccp->cmd_lock, flags); return ccp->cmd_q_count == suspended; } void ccp_dev_suspend(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; unsigned int i; /* If there's no device there's nothing to do */ if (!ccp) return; spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->suspending = 1; /* Wake all the queue kthreads to prepare for suspend */ for (i = 0; i < ccp->cmd_q_count; i++) wake_up_process(ccp->cmd_q[i].kthread); spin_unlock_irqrestore(&ccp->cmd_lock, flags); /* Wait for all queue kthreads to say they're done */ while (!ccp_queues_suspended(ccp)) wait_event_interruptible(ccp->suspend_queue, ccp_queues_suspended(ccp)); } void ccp_dev_resume(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; unsigned long flags; unsigned int i; /* If there's no device there's nothing to do */ if (!ccp) return; spin_lock_irqsave(&ccp->cmd_lock, flags); ccp->suspending = 0; /* Wake up all the kthreads */ for (i = 0; i < ccp->cmd_q_count; i++) { ccp->cmd_q[i].suspended = 0; wake_up_process(ccp->cmd_q[i].kthread); } spin_unlock_irqrestore(&ccp->cmd_lock, flags); } int ccp_dev_init(struct sp_device *sp) { struct device *dev = sp->dev; struct ccp_device *ccp; int ret; /* * Check how many we have so far, and stop after reaching * that number */ if (atomic_inc_return(&dev_count) > max_devs) return 0; /* don't fail the load */ ret = -ENOMEM; ccp = ccp_alloc_struct(sp); if (!ccp) goto e_err; sp->ccp_data = ccp; if (!nqueues || (nqueues > MAX_HW_QUEUES)) ccp->max_q_count = MAX_HW_QUEUES; else ccp->max_q_count = nqueues; ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; if (!ccp->vdata || !ccp->vdata->version) { ret = -ENODEV; dev_err(dev, "missing driver data\n"); goto e_err; } ccp->use_tasklet = sp->use_tasklet; ccp->io_regs = sp->io_map + ccp->vdata->offset; if (ccp->vdata->setup) ccp->vdata->setup(ccp); ret = ccp->vdata->perform->init(ccp); if (ret) { /* A positive number means that the device cannot be initialized, * but no additional message is required. */ if (ret > 0) goto e_quiet; /* An unexpected problem occurred, and should be reported in the log */ goto e_err; } dev_notice(dev, "ccp enabled\n"); return 0; e_err: dev_notice(dev, "ccp initialization failed\n"); e_quiet: sp->ccp_data = NULL; return ret; } void ccp_dev_destroy(struct sp_device *sp) { struct ccp_device *ccp = sp->ccp_data; if (!ccp) return; ccp->vdata->perform->destroy(ccp); }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) ST-Ericsson SA 2012 * * Author: Ola Lilja <[email protected]>, * for ST-Ericsson. */ #ifndef UX500_MSP_I2S_H #define UX500_MSP_I2S_H #include <linux/platform_device.h> #define MSP_INPUT_FREQ_APB 48000000 /*** Stereo mode. Used for APB data accesses as 16 bits accesses (mono), * 32 bits accesses (stereo). ***/ enum msp_stereo_mode { MSP_MONO, MSP_STEREO }; /* Direction (Transmit/Receive mode) */ enum msp_direction { MSP_TX = 1, MSP_RX = 2 }; /* Transmit and receive configuration register */ #define MSP_BIG_ENDIAN 0x00000000 #define MSP_LITTLE_ENDIAN 0x00001000 #define MSP_UNEXPECTED_FS_ABORT 0x00000000 #define MSP_UNEXPECTED_FS_IGNORE 0x00008000 #define MSP_NON_MODE_BIT_MASK 0x00009000 /* Global configuration register */ #define RX_ENABLE 0x00000001 #define RX_FIFO_ENABLE 0x00000002 #define RX_SYNC_SRG 0x00000010 #define RX_CLK_POL_RISING 0x00000020 #define RX_CLK_SEL_SRG 0x00000040 #define TX_ENABLE 0x00000100 #define TX_FIFO_ENABLE 0x00000200 #define TX_SYNC_SRG_PROG 0x00001800 #define TX_SYNC_SRG_AUTO 0x00001000 #define TX_CLK_POL_RISING 0x00002000 #define TX_CLK_SEL_SRG 0x00004000 #define TX_EXTRA_DELAY_ENABLE 0x00008000 #define SRG_ENABLE 0x00010000 #define FRAME_GEN_ENABLE 0x00100000 #define SRG_CLK_SEL_APB 0x00000000 #define RX_FIFO_SYNC_HI 0x00000000 #define TX_FIFO_SYNC_HI 0x00000000 #define SPI_CLK_MODE_NORMAL 0x00000000 #define MSP_FRAME_SIZE_AUTO -1 #define MSP_DR 0x00 #define MSP_GCR 0x04 #define MSP_TCF 0x08 #define MSP_RCF 0x0c #define MSP_SRG 0x10 #define MSP_FLR 0x14 #define MSP_DMACR 0x18 #define MSP_IMSC 0x20 #define MSP_RIS 0x24 #define MSP_MIS 0x28 #define MSP_ICR 0x2c #define MSP_MCR 0x30 #define MSP_RCV 0x34 #define MSP_RCM 0x38 #define MSP_TCE0 0x40 #define MSP_TCE1 0x44 #define MSP_TCE2 0x48 #define MSP_TCE3 0x4c #define MSP_RCE0 0x60 #define MSP_RCE1 0x64 #define MSP_RCE2 0x68 #define MSP_RCE3 0x6c #define MSP_IODLY 0x70 #define MSP_ITCR 0x80 #define MSP_ITIP 0x84 #define MSP_ITOP 0x88 #define MSP_TSTDR 0x8c #define MSP_PID0 0xfe0 #define MSP_PID1 0xfe4 #define MSP_PID2 0xfe8 #define MSP_PID3 0xfec #define MSP_CID0 0xff0 #define MSP_CID1 0xff4 #define MSP_CID2 0xff8 #define MSP_CID3 0xffc /* Protocol dependant parameters list */ #define RX_ENABLE_MASK BIT(0) #define RX_FIFO_ENABLE_MASK BIT(1) #define RX_FSYNC_MASK BIT(2) #define DIRECT_COMPANDING_MASK BIT(3) #define RX_SYNC_SEL_MASK BIT(4) #define RX_CLK_POL_MASK BIT(5) #define RX_CLK_SEL_MASK BIT(6) #define LOOPBACK_MASK BIT(7) #define TX_ENABLE_MASK BIT(8) #define TX_FIFO_ENABLE_MASK BIT(9) #define TX_FSYNC_MASK BIT(10) #define TX_MSP_TDR_TSR BIT(11) #define TX_SYNC_SEL_MASK (BIT(12) | BIT(11)) #define TX_CLK_POL_MASK BIT(13) #define TX_CLK_SEL_MASK BIT(14) #define TX_EXTRA_DELAY_MASK BIT(15) #define SRG_ENABLE_MASK BIT(16) #define SRG_CLK_POL_MASK BIT(17) #define SRG_CLK_SEL_MASK (BIT(19) | BIT(18)) #define FRAME_GEN_EN_MASK BIT(20) #define SPI_CLK_MODE_MASK (BIT(22) | BIT(21)) #define SPI_BURST_MODE_MASK BIT(23) #define RXEN_SHIFT 0 #define RFFEN_SHIFT 1 #define RFSPOL_SHIFT 2 #define DCM_SHIFT 3 #define RFSSEL_SHIFT 4 #define RCKPOL_SHIFT 5 #define RCKSEL_SHIFT 6 #define LBM_SHIFT 7 #define TXEN_SHIFT 8 #define TFFEN_SHIFT 9 #define TFSPOL_SHIFT 10 #define TFSSEL_SHIFT 11 #define TCKPOL_SHIFT 13 #define TCKSEL_SHIFT 14 #define TXDDL_SHIFT 15 #define SGEN_SHIFT 16 #define SCKPOL_SHIFT 17 #define SCKSEL_SHIFT 18 #define FGEN_SHIFT 20 #define SPICKM_SHIFT 21 #define TBSWAP_SHIFT 28 #define RCKPOL_MASK BIT(0) #define TCKPOL_MASK BIT(0) #define SPICKM_MASK (BIT(1) | BIT(0)) #define MSP_RX_CLKPOL_BIT(n) ((n & RCKPOL_MASK) << RCKPOL_SHIFT) #define MSP_TX_CLKPOL_BIT(n) ((n & TCKPOL_MASK) << TCKPOL_SHIFT) #define P1ELEN_SHIFT 0 #define P1FLEN_SHIFT 3 #define DTYP_SHIFT 10 #define ENDN_SHIFT 12 #define DDLY_SHIFT 13 #define FSIG_SHIFT 15 #define P2ELEN_SHIFT 16 #define P2FLEN_SHIFT 19 #define P2SM_SHIFT 26 #define P2EN_SHIFT 27 #define FSYNC_SHIFT 15 #define P1ELEN_MASK 0x00000007 #define P2ELEN_MASK 0x00070000 #define P1FLEN_MASK 0x00000378 #define P2FLEN_MASK 0x03780000 #define DDLY_MASK 0x00003000 #define DTYP_MASK 0x00000600 #define P2SM_MASK 0x04000000 #define P2EN_MASK 0x08000000 #define ENDN_MASK 0x00001000 #define TFSPOL_MASK 0x00000400 #define TBSWAP_MASK 0x30000000 #define COMPANDING_MODE_MASK 0x00000c00 #define FSYNC_MASK 0x00008000 #define MSP_P1_ELEM_LEN_BITS(n) (n & P1ELEN_MASK) #define MSP_P2_ELEM_LEN_BITS(n) (((n) << P2ELEN_SHIFT) & P2ELEN_MASK) #define MSP_P1_FRAME_LEN_BITS(n) (((n) << P1FLEN_SHIFT) & P1FLEN_MASK) #define MSP_P2_FRAME_LEN_BITS(n) (((n) << P2FLEN_SHIFT) & P2FLEN_MASK) #define MSP_DATA_DELAY_BITS(n) (((n) << DDLY_SHIFT) & DDLY_MASK) #define MSP_DATA_TYPE_BITS(n) (((n) << DTYP_SHIFT) & DTYP_MASK) #define MSP_P2_START_MODE_BIT(n) ((n << P2SM_SHIFT) & P2SM_MASK) #define MSP_P2_ENABLE_BIT(n) ((n << P2EN_SHIFT) & P2EN_MASK) #define MSP_SET_ENDIANNES_BIT(n) ((n << ENDN_SHIFT) & ENDN_MASK) #define MSP_FSYNC_POL(n) ((n << TFSPOL_SHIFT) & TFSPOL_MASK) #define MSP_DATA_WORD_SWAP(n) ((n << TBSWAP_SHIFT) & TBSWAP_MASK) #define MSP_SET_COMPANDING_MODE(n) ((n << DTYP_SHIFT) & \ COMPANDING_MODE_MASK) #define MSP_SET_FSYNC_IGNORE(n) ((n << FSYNC_SHIFT) & FSYNC_MASK) /* Flag register */ #define RX_BUSY BIT(0) #define RX_FIFO_EMPTY BIT(1) #define RX_FIFO_FULL BIT(2) #define TX_BUSY BIT(3) #define TX_FIFO_EMPTY BIT(4) #define TX_FIFO_FULL BIT(5) #define RBUSY_SHIFT 0 #define RFE_SHIFT 1 #define RFU_SHIFT 2 #define TBUSY_SHIFT 3 #define TFE_SHIFT 4 #define TFU_SHIFT 5 /* Multichannel control register */ #define RMCEN_SHIFT 0 #define RMCSF_SHIFT 1 #define RCMPM_SHIFT 3 #define TMCEN_SHIFT 5 #define TNCSF_SHIFT 6 /* Sample rate generator register */ #define SCKDIV_SHIFT 0 #define FRWID_SHIFT 10 #define FRPER_SHIFT 16 #define SCK_DIV_MASK 0x0000003FF #define FRAME_WIDTH_BITS(n) (((n) << FRWID_SHIFT) & 0x0000FC00) #define FRAME_PERIOD_BITS(n) (((n) << FRPER_SHIFT) & 0x1FFF0000) /* DMA controller register */ #define RX_DMA_ENABLE BIT(0) #define TX_DMA_ENABLE BIT(1) #define RDMAE_SHIFT 0 #define TDMAE_SHIFT 1 /* Interrupt Register */ #define RX_SERVICE_INT BIT(0) #define RX_OVERRUN_ERROR_INT BIT(1) #define RX_FSYNC_ERR_INT BIT(2) #define RX_FSYNC_INT BIT(3) #define TX_SERVICE_INT BIT(4) #define TX_UNDERRUN_ERR_INT BIT(5) #define TX_FSYNC_ERR_INT BIT(6) #define TX_FSYNC_INT BIT(7) #define ALL_INT 0x000000ff /* MSP test control register */ #define MSP_ITCR_ITEN BIT(0) #define MSP_ITCR_TESTFIFO BIT(1) #define RMCEN_BIT 0 #define RMCSF_BIT 1 #define RCMPM_BIT 3 #define TMCEN_BIT 5 #define TNCSF_BIT 6 /* Single or dual phase mode */ enum msp_phase_mode { MSP_SINGLE_PHASE, MSP_DUAL_PHASE }; /* Frame length */ enum msp_frame_length { MSP_FRAME_LEN_1 = 0, MSP_FRAME_LEN_2 = 1, MSP_FRAME_LEN_4 = 3, MSP_FRAME_LEN_8 = 7, MSP_FRAME_LEN_12 = 11, MSP_FRAME_LEN_16 = 15, MSP_FRAME_LEN_20 = 19, MSP_FRAME_LEN_32 = 31, MSP_FRAME_LEN_48 = 47, MSP_FRAME_LEN_64 = 63 }; /* Element length */ enum msp_elem_length { MSP_ELEM_LEN_8 = 0, MSP_ELEM_LEN_10 = 1, MSP_ELEM_LEN_12 = 2, MSP_ELEM_LEN_14 = 3, MSP_ELEM_LEN_16 = 4, MSP_ELEM_LEN_20 = 5, MSP_ELEM_LEN_24 = 6, MSP_ELEM_LEN_32 = 7 }; enum msp_data_xfer_width { MSP_DATA_TRANSFER_WIDTH_BYTE, MSP_DATA_TRANSFER_WIDTH_HALFWORD, MSP_DATA_TRANSFER_WIDTH_WORD }; enum msp_frame_sync { MSP_FSYNC_UNIGNORE = 0, MSP_FSYNC_IGNORE = 1, }; enum msp_phase2_start_mode { MSP_PHASE2_START_MODE_IMEDIATE, MSP_PHASE2_START_MODE_FSYNC }; enum msp_btf { MSP_BTF_MS_BIT_FIRST = 0, MSP_BTF_LS_BIT_FIRST = 1 }; enum msp_fsync_pol { MSP_FSYNC_POL_ACT_HI = 0, MSP_FSYNC_POL_ACT_LO = 1 }; /* Data delay (in bit clock cycles) */ enum msp_delay { MSP_DELAY_0 = 0, MSP_DELAY_1 = 1, MSP_DELAY_2 = 2, MSP_DELAY_3 = 3 }; /* Configurations of clocks (transmit, receive or sample rate generator) */ enum msp_edge { MSP_FALLING_EDGE = 0, MSP_RISING_EDGE = 1, }; enum msp_hws { MSP_SWAP_NONE = 0, MSP_SWAP_BYTE_PER_WORD = 1, MSP_SWAP_BYTE_PER_HALF_WORD = 2, MSP_SWAP_HALF_WORD_PER_WORD = 3 }; enum msp_compress_mode { MSP_COMPRESS_MODE_LINEAR = 0, MSP_COMPRESS_MODE_MU_LAW = 2, MSP_COMPRESS_MODE_A_LAW = 3 }; enum msp_expand_mode { MSP_EXPAND_MODE_LINEAR = 0, MSP_EXPAND_MODE_LINEAR_SIGNED = 1, MSP_EXPAND_MODE_MU_LAW = 2, MSP_EXPAND_MODE_A_LAW = 3 }; #define MSP_FRAME_PERIOD_IN_MONO_MODE 256 #define MSP_FRAME_PERIOD_IN_STEREO_MODE 32 #define MSP_FRAME_WIDTH_IN_STEREO_MODE 16 enum msp_protocol { MSP_I2S_PROTOCOL, MSP_PCM_PROTOCOL, MSP_PCM_COMPAND_PROTOCOL, MSP_INVALID_PROTOCOL }; /* * No of registers to backup during * suspend resume */ #define MAX_MSP_BACKUP_REGS 36 enum i2s_direction_t { MSP_DIR_TX = 0x01, MSP_DIR_RX = 0x02, }; enum msp_data_size { MSP_DATA_BITS_DEFAULT = -1, MSP_DATA_BITS_8 = 0x00, MSP_DATA_BITS_10, MSP_DATA_BITS_12, MSP_DATA_BITS_14, MSP_DATA_BITS_16, MSP_DATA_BITS_20, MSP_DATA_BITS_24, MSP_DATA_BITS_32, }; enum msp_state { MSP_STATE_IDLE = 0, MSP_STATE_CONFIGURED = 1, MSP_STATE_RUNNING = 2, }; enum msp_rx_comparison_enable_mode { MSP_COMPARISON_DISABLED = 0, MSP_COMPARISON_NONEQUAL_ENABLED = 2, MSP_COMPARISON_EQUAL_ENABLED = 3 }; struct msp_multichannel_config { bool rx_multichannel_enable; bool tx_multichannel_enable; enum msp_rx_comparison_enable_mode rx_comparison_enable_mode; u8 padding; u32 comparison_value; u32 comparison_mask; u32 rx_channel_0_enable; u32 rx_channel_1_enable; u32 rx_channel_2_enable; u32 rx_channel_3_enable; u32 tx_channel_0_enable; u32 tx_channel_1_enable; u32 tx_channel_2_enable; u32 tx_channel_3_enable; }; struct msp_protdesc { u32 rx_phase_mode; u32 tx_phase_mode; u32 rx_phase2_start_mode; u32 tx_phase2_start_mode; u32 rx_byte_order; u32 tx_byte_order; u32 rx_frame_len_1; u32 rx_frame_len_2; u32 tx_frame_len_1; u32 tx_frame_len_2; u32 rx_elem_len_1; u32 rx_elem_len_2; u32 tx_elem_len_1; u32 tx_elem_len_2; u32 rx_data_delay; u32 tx_data_delay; u32 rx_clk_pol; u32 tx_clk_pol; u32 rx_fsync_pol; u32 tx_fsync_pol; u32 rx_half_word_swap; u32 tx_half_word_swap; u32 compression_mode; u32 expansion_mode; u32 frame_sync_ignore; u32 frame_period; u32 frame_width; u32 clocks_per_frame; }; struct ux500_msp_config { unsigned int f_inputclk; unsigned int rx_clk_sel; unsigned int tx_clk_sel; unsigned int srg_clk_sel; unsigned int rx_fsync_pol; unsigned int tx_fsync_pol; unsigned int rx_fsync_sel; unsigned int tx_fsync_sel; unsigned int rx_fifo_config; unsigned int tx_fifo_config; unsigned int loopback_enable; unsigned int tx_data_enable; unsigned int default_protdesc; struct msp_protdesc protdesc; int multichannel_configured; struct msp_multichannel_config multichannel_config; unsigned int direction; unsigned int protocol; unsigned int frame_freq; enum msp_data_size data_size; unsigned int def_elem_len; unsigned int iodelay; }; struct ux500_msp { int id; void __iomem *registers; struct device *dev; dma_addr_t tx_rx_addr; enum msp_state msp_state; int def_elem_len; unsigned int dir_busy; int loopback_enable; unsigned int f_bitclk; }; int ux500_msp_i2s_init_msp(struct platform_device *pdev, struct ux500_msp **msp_p); void ux500_msp_i2s_cleanup_msp(struct platform_device *pdev, struct ux500_msp *msp); int ux500_msp_i2s_open(struct ux500_msp *msp, struct ux500_msp_config *config); int ux500_msp_i2s_close(struct ux500_msp *msp, unsigned int dir); int ux500_msp_i2s_trigger(struct ux500_msp *msp, int cmd, int direction); #endif
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for Atmel Pulse Width Modulation Controller * * Copyright (C) 2013 Atmel Corporation * Bo Shen <[email protected]> * * Links to reference manuals for the supported PWM chips can be found in * Documentation/arch/arm/microchip.rst. * * Limitations: * - Periods start with the inactive level. * - Hardware has to be stopped in general to update settings. * * Software bugs/possible improvements: * - When atmel_pwm_apply() is called with state->enabled=false a change in * state->polarity isn't honored. * - Instead of sleeping to wait for a completed period, the interrupt * functionality could be used. */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pwm.h> #include <linux/slab.h> /* The following is global registers for PWM controller */ #define PWM_ENA 0x04 #define PWM_DIS 0x08 #define PWM_SR 0x0C #define PWM_ISR 0x1C /* Bit field in SR */ #define PWM_SR_ALL_CH_MASK 0x0F /* The following register is PWM channel related registers */ #define PWM_CH_REG_OFFSET 0x200 #define PWM_CH_REG_SIZE 0x20 #define PWM_CMR 0x0 /* Bit field in CMR */ #define PWM_CMR_CPOL (1 << 9) #define PWM_CMR_UPD_CDTY (1 << 10) #define PWM_CMR_CPRE_MSK 0xF /* The following registers for PWM v1 */ #define PWMV1_CDTY 0x04 #define PWMV1_CPRD 0x08 #define PWMV1_CUPD 0x10 /* The following registers for PWM v2 */ #define PWMV2_CDTY 0x04 #define PWMV2_CDTYUPD 0x08 #define PWMV2_CPRD 0x0C #define PWMV2_CPRDUPD 0x10 #define PWM_MAX_PRES 10 struct atmel_pwm_registers { u8 period; u8 period_upd; u8 duty; u8 duty_upd; }; struct atmel_pwm_config { u32 period_bits; }; struct atmel_pwm_data { struct atmel_pwm_registers regs; struct atmel_pwm_config cfg; }; struct atmel_pwm_chip { struct clk *clk; void __iomem *base; const struct atmel_pwm_data *data; /* * The hardware supports a mechanism to update a channel's duty cycle at * the end of the currently running period. When such an update is * pending we delay disabling the PWM until the new configuration is * active because otherwise pmw_config(duty_cycle=0); pwm_disable(); * might not result in an inactive output. * This bitmask tracks for which channels an update is pending in * hardware. */ u32 update_pending; /* Protects .update_pending */ spinlock_t lock; }; static inline struct atmel_pwm_chip *to_atmel_pwm_chip(struct pwm_chip *chip) { return pwmchip_get_drvdata(chip); } static inline u32 atmel_pwm_readl(struct atmel_pwm_chip *chip, unsigned long offset) { return readl_relaxed(chip->base + offset); } static inline void atmel_pwm_writel(struct atmel_pwm_chip *chip, unsigned long offset, unsigned long val) { writel_relaxed(val, chip->base + offset); } static inline u32 atmel_pwm_ch_readl(struct atmel_pwm_chip *chip, unsigned int ch, unsigned long offset) { unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE; return atmel_pwm_readl(chip, base + offset); } static inline void atmel_pwm_ch_writel(struct atmel_pwm_chip *chip, unsigned int ch, unsigned long offset, unsigned long val) { unsigned long base = PWM_CH_REG_OFFSET + ch * PWM_CH_REG_SIZE; atmel_pwm_writel(chip, base + offset, val); } static void atmel_pwm_update_pending(struct atmel_pwm_chip *chip) { /* * Each channel that has its bit in ISR set started a new period since * ISR was cleared and so there is no more update pending. Note that * reading ISR clears it, so this needs to handle all channels to not * loose information. */ u32 isr = atmel_pwm_readl(chip, PWM_ISR); chip->update_pending &= ~isr; } static void atmel_pwm_set_pending(struct atmel_pwm_chip *chip, unsigned int ch) { spin_lock(&chip->lock); /* * Clear pending flags in hardware because otherwise there might still * be a stale flag in ISR. */ atmel_pwm_update_pending(chip); chip->update_pending |= (1 << ch); spin_unlock(&chip->lock); } static int atmel_pwm_test_pending(struct atmel_pwm_chip *chip, unsigned int ch) { int ret = 0; spin_lock(&chip->lock); if (chip->update_pending & (1 << ch)) { atmel_pwm_update_pending(chip); if (chip->update_pending & (1 << ch)) ret = 1; } spin_unlock(&chip->lock); return ret; } static int atmel_pwm_wait_nonpending(struct atmel_pwm_chip *chip, unsigned int ch) { unsigned long timeout = jiffies + 2 * HZ; int ret; while ((ret = atmel_pwm_test_pending(chip, ch)) && time_before(jiffies, timeout)) usleep_range(10, 100); return ret ? -ETIMEDOUT : 0; } static int atmel_pwm_calculate_cprd_and_pres(struct pwm_chip *chip, unsigned long clkrate, const struct pwm_state *state, unsigned long *cprd, u32 *pres) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); unsigned long long cycles = state->period; int shift; /* Calculate the period cycles and prescale value */ cycles *= clkrate; do_div(cycles, NSEC_PER_SEC); /* * The register for the period length is cfg.period_bits bits wide. * So for each bit the number of clock cycles is wider divide the input * clock frequency by two using pres and shift cprd accordingly. */ shift = fls(cycles) - atmel_pwm->data->cfg.period_bits; if (shift > PWM_MAX_PRES) { dev_err(pwmchip_parent(chip), "pres exceeds the maximum value\n"); return -EINVAL; } else if (shift > 0) { *pres = shift; cycles >>= *pres; } else { *pres = 0; } *cprd = cycles; return 0; } static void atmel_pwm_calculate_cdty(const struct pwm_state *state, unsigned long clkrate, unsigned long cprd, u32 pres, unsigned long *cdty) { unsigned long long cycles = state->duty_cycle; cycles *= clkrate; do_div(cycles, NSEC_PER_SEC); cycles >>= pres; *cdty = cprd - cycles; } static void atmel_pwm_update_cdty(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long cdty) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); u32 val; if (atmel_pwm->data->regs.duty_upd == atmel_pwm->data->regs.period_upd) { val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); val &= ~PWM_CMR_UPD_CDTY; atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); } atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, atmel_pwm->data->regs.duty_upd, cdty); atmel_pwm_set_pending(atmel_pwm, pwm->hwpwm); } static void atmel_pwm_set_cprd_cdty(struct pwm_chip *chip, struct pwm_device *pwm, unsigned long cprd, unsigned long cdty) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, atmel_pwm->data->regs.duty, cdty); atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, atmel_pwm->data->regs.period, cprd); } static void atmel_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm, bool disable_clk) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); unsigned long timeout; atmel_pwm_wait_nonpending(atmel_pwm, pwm->hwpwm); atmel_pwm_writel(atmel_pwm, PWM_DIS, 1 << pwm->hwpwm); /* * Wait for the PWM channel disable operation to be effective before * stopping the clock. */ timeout = jiffies + 2 * HZ; while ((atmel_pwm_readl(atmel_pwm, PWM_SR) & (1 << pwm->hwpwm)) && time_before(jiffies, timeout)) usleep_range(10, 100); if (disable_clk) clk_disable(atmel_pwm->clk); } static int atmel_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); unsigned long cprd, cdty; u32 pres, val; int ret; if (state->enabled) { unsigned long clkrate = clk_get_rate(atmel_pwm->clk); if (pwm->state.enabled && pwm->state.polarity == state->polarity && pwm->state.period == state->period) { u32 cmr = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, atmel_pwm->data->regs.period); pres = cmr & PWM_CMR_CPRE_MSK; atmel_pwm_calculate_cdty(state, clkrate, cprd, pres, &cdty); atmel_pwm_update_cdty(chip, pwm, cdty); return 0; } ret = atmel_pwm_calculate_cprd_and_pres(chip, clkrate, state, &cprd, &pres); if (ret) { dev_err(pwmchip_parent(chip), "failed to calculate cprd and prescaler\n"); return ret; } atmel_pwm_calculate_cdty(state, clkrate, cprd, pres, &cdty); if (pwm->state.enabled) { atmel_pwm_disable(chip, pwm, false); } else { ret = clk_enable(atmel_pwm->clk); if (ret) { dev_err(pwmchip_parent(chip), "failed to enable clock\n"); return ret; } } /* It is necessary to preserve CPOL, inside CMR */ val = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); val = (val & ~PWM_CMR_CPRE_MSK) | (pres & PWM_CMR_CPRE_MSK); if (state->polarity == PWM_POLARITY_NORMAL) val &= ~PWM_CMR_CPOL; else val |= PWM_CMR_CPOL; atmel_pwm_ch_writel(atmel_pwm, pwm->hwpwm, PWM_CMR, val); atmel_pwm_set_cprd_cdty(chip, pwm, cprd, cdty); atmel_pwm_writel(atmel_pwm, PWM_ENA, 1 << pwm->hwpwm); } else if (pwm->state.enabled) { atmel_pwm_disable(chip, pwm, true); } return 0; } static int atmel_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); u32 sr, cmr; sr = atmel_pwm_readl(atmel_pwm, PWM_SR); cmr = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, PWM_CMR); if (sr & (1 << pwm->hwpwm)) { unsigned long rate = clk_get_rate(atmel_pwm->clk); u32 cdty, cprd, pres; u64 tmp; pres = cmr & PWM_CMR_CPRE_MSK; cprd = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, atmel_pwm->data->regs.period); tmp = (u64)cprd * NSEC_PER_SEC; tmp <<= pres; state->period = DIV64_U64_ROUND_UP(tmp, rate); /* Wait for an updated duty_cycle queued in hardware */ atmel_pwm_wait_nonpending(atmel_pwm, pwm->hwpwm); cdty = atmel_pwm_ch_readl(atmel_pwm, pwm->hwpwm, atmel_pwm->data->regs.duty); tmp = (u64)(cprd - cdty) * NSEC_PER_SEC; tmp <<= pres; state->duty_cycle = DIV64_U64_ROUND_UP(tmp, rate); state->enabled = true; } else { state->enabled = false; } if (cmr & PWM_CMR_CPOL) state->polarity = PWM_POLARITY_INVERSED; else state->polarity = PWM_POLARITY_NORMAL; return 0; } static const struct pwm_ops atmel_pwm_ops = { .apply = atmel_pwm_apply, .get_state = atmel_pwm_get_state, }; static const struct atmel_pwm_data atmel_sam9rl_pwm_data = { .regs = { .period = PWMV1_CPRD, .period_upd = PWMV1_CUPD, .duty = PWMV1_CDTY, .duty_upd = PWMV1_CUPD, }, .cfg = { /* 16 bits to keep period and duty. */ .period_bits = 16, }, }; static const struct atmel_pwm_data atmel_sama5_pwm_data = { .regs = { .period = PWMV2_CPRD, .period_upd = PWMV2_CPRDUPD, .duty = PWMV2_CDTY, .duty_upd = PWMV2_CDTYUPD, }, .cfg = { /* 16 bits to keep period and duty. */ .period_bits = 16, }, }; static const struct atmel_pwm_data mchp_sam9x60_pwm_data = { .regs = { .period = PWMV1_CPRD, .period_upd = PWMV1_CUPD, .duty = PWMV1_CDTY, .duty_upd = PWMV1_CUPD, }, .cfg = { /* 32 bits to keep period and duty. */ .period_bits = 32, }, }; static const struct of_device_id atmel_pwm_dt_ids[] = { { .compatible = "atmel,at91sam9rl-pwm", .data = &atmel_sam9rl_pwm_data, }, { .compatible = "atmel,sama5d3-pwm", .data = &atmel_sama5_pwm_data, }, { .compatible = "atmel,sama5d2-pwm", .data = &atmel_sama5_pwm_data, }, { .compatible = "microchip,sam9x60-pwm", .data = &mchp_sam9x60_pwm_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, atmel_pwm_dt_ids); static int atmel_pwm_enable_clk_if_on(struct pwm_chip *chip, bool on) { struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); unsigned int i, cnt = 0; unsigned long sr; int ret = 0; sr = atmel_pwm_readl(atmel_pwm, PWM_SR) & PWM_SR_ALL_CH_MASK; if (!sr) return 0; cnt = bitmap_weight(&sr, chip->npwm); if (!on) goto disable_clk; for (i = 0; i < cnt; i++) { ret = clk_enable(atmel_pwm->clk); if (ret) { dev_err(pwmchip_parent(chip), "failed to enable clock for pwm %pe\n", ERR_PTR(ret)); cnt = i; goto disable_clk; } } return 0; disable_clk: while (cnt--) clk_disable(atmel_pwm->clk); return ret; } static int atmel_pwm_probe(struct platform_device *pdev) { struct atmel_pwm_chip *atmel_pwm; struct pwm_chip *chip; int ret; chip = devm_pwmchip_alloc(&pdev->dev, 4, sizeof(*atmel_pwm)); if (IS_ERR(chip)) return PTR_ERR(chip); atmel_pwm = to_atmel_pwm_chip(chip); atmel_pwm->data = of_device_get_match_data(&pdev->dev); atmel_pwm->update_pending = 0; spin_lock_init(&atmel_pwm->lock); atmel_pwm->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(atmel_pwm->base)) return PTR_ERR(atmel_pwm->base); atmel_pwm->clk = devm_clk_get_prepared(&pdev->dev, NULL); if (IS_ERR(atmel_pwm->clk)) return dev_err_probe(&pdev->dev, PTR_ERR(atmel_pwm->clk), "failed to get prepared PWM clock\n"); chip->ops = &atmel_pwm_ops; ret = atmel_pwm_enable_clk_if_on(chip, true); if (ret < 0) return ret; ret = devm_pwmchip_add(&pdev->dev, chip); if (ret < 0) { dev_err_probe(&pdev->dev, ret, "failed to add PWM chip\n"); goto disable_clk; } return 0; disable_clk: atmel_pwm_enable_clk_if_on(chip, false); return ret; } static struct platform_driver atmel_pwm_driver = { .driver = { .name = "atmel-pwm", .of_match_table = atmel_pwm_dt_ids, }, .probe = atmel_pwm_probe, }; module_platform_driver(atmel_pwm_driver); MODULE_ALIAS("platform:atmel-pwm"); MODULE_AUTHOR("Bo Shen <[email protected]>"); MODULE_DESCRIPTION("Atmel PWM driver"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Copyright (C) International Business Machines Corp., 2000-2001 */ #ifndef _H_JFS_DINODE #define _H_JFS_DINODE /* * jfs_dinode.h: on-disk inode manager */ #define INODESLOTSIZE 128 #define L2INODESLOTSIZE 7 #define log2INODESIZE 9 /* log2(bytes per dinode) */ /* * on-disk inode : 512 bytes * * note: align 64-bit fields on 8-byte boundary. */ struct dinode { /* * I. base area (128 bytes) * ------------------------ * * define generic/POSIX attributes */ __le32 di_inostamp; /* 4: stamp to show inode belongs to fileset */ __le32 di_fileset; /* 4: fileset number */ __le32 di_number; /* 4: inode number, aka file serial number */ __le32 di_gen; /* 4: inode generation number */ pxd_t di_ixpxd; /* 8: inode extent descriptor */ __le64 di_size; /* 8: size */ __le64 di_nblocks; /* 8: number of blocks allocated */ __le32 di_nlink; /* 4: number of links to the object */ __le32 di_uid; /* 4: user id of owner */ __le32 di_gid; /* 4: group id of owner */ __le32 di_mode; /* 4: attribute, format and permission */ struct timestruc_t di_atime; /* 8: time last data accessed */ struct timestruc_t di_ctime; /* 8: time last status changed */ struct timestruc_t di_mtime; /* 8: time last data modified */ struct timestruc_t di_otime; /* 8: time created */ dxd_t di_acl; /* 16: acl descriptor */ dxd_t di_ea; /* 16: ea descriptor */ __le32 di_next_index; /* 4: Next available dir_table index */ __le32 di_acltype; /* 4: Type of ACL */ /* * Extension Areas. * * Historically, the inode was partitioned into 4 128-byte areas, * the last 3 being defined as unions which could have multiple * uses. The first 96 bytes had been completely unused until * an index table was added to the directory. It is now more * useful to describe the last 3/4 of the inode as a single * union. We would probably be better off redesigning the * entire structure from scratch, but we don't want to break * commonality with OS/2's JFS at this time. */ union { struct { /* * This table contains the information needed to * find a directory entry from a 32-bit index. * If the index is small enough, the table is inline, * otherwise, an x-tree root overlays this table */ struct dir_table_slot _table[12]; /* 96: inline */ dtroot_t _dtroot; /* 288: dtree root */ } _dir; /* (384) */ #define di_dirtable u._dir._table #define di_dtroot u._dir._dtroot #define di_parent di_dtroot.header.idotdot #define di_DASD di_dtroot.header.DASD struct { union { u8 _data[96]; /* 96: unused */ struct { void *_imap; /* 4: unused */ __le32 _gengen; /* 4: generator */ } _imap; } _u1; /* 96: */ #define di_gengen u._file._u1._imap._gengen union { xtroot_t _xtroot; struct { u8 unused[16]; /* 16: */ dxd_t _dxd; /* 16: */ union { /* * The fast symlink area * is expected to overflow * into _inlineea when * needed (which will clear * INLINEEA). */ struct { union { __le32 _rdev; /* 4: */ u8 _fastsymlink[128]; } _u; u8 _inlineea[128]; }; u8 _inline_all[256]; }; } _special; } _u2; } _file; #define di_xtroot u._file._u2._xtroot #define di_dxd u._file._u2._special._dxd #define di_btroot di_xtroot #define di_inlinedata u._file._u2._special._u #define di_rdev u._file._u2._special._u._rdev #define di_fastsymlink u._file._u2._special._u._fastsymlink #define di_inlineea u._file._u2._special._inlineea #define di_inline_all u._file._u2._special._inline_all } u; }; /* extended mode bits (on-disk inode di_mode) */ #define IFJOURNAL 0x00010000 /* journalled file */ #define ISPARSE 0x00020000 /* sparse file enabled */ #define INLINEEA 0x00040000 /* inline EA area free */ #define ISWAPFILE 0x00800000 /* file open for pager swap space */ /* more extended mode bits: attributes for OS/2 */ #define IREADONLY 0x02000000 /* no write access to file */ #define IHIDDEN 0x04000000 /* hidden file */ #define ISYSTEM 0x08000000 /* system file */ #define IDIRECTORY 0x20000000 /* directory (shadow of real bit) */ #define IARCHIVE 0x40000000 /* file archive bit */ #define INEWNAME 0x80000000 /* non-8.3 filename format */ #define IRASH 0x4E000000 /* mask for changeable attributes */ #define ATTRSHIFT 25 /* bits to shift to move attribute specification to mode position */ /* extended attributes for Linux */ #define JFS_NOATIME_FL 0x00080000 /* do not update atime */ #define JFS_DIRSYNC_FL 0x00100000 /* dirsync behaviour */ #define JFS_SYNC_FL 0x00200000 /* Synchronous updates */ #define JFS_SECRM_FL 0x00400000 /* Secure deletion */ #define JFS_UNRM_FL 0x00800000 /* allow for undelete */ #define JFS_APPEND_FL 0x01000000 /* writes to file may only append */ #define JFS_IMMUTABLE_FL 0x02000000 /* Immutable file */ #define JFS_FL_USER_VISIBLE 0x03F80000 #define JFS_FL_USER_MODIFIABLE 0x03F80000 #define JFS_FL_INHERIT 0x03C80000 #endif /*_H_JFS_DINODE */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _X86_ENCLS_H #define _X86_ENCLS_H #include <linux/bitops.h> #include <linux/err.h> #include <linux/io.h> #include <linux/rwsem.h> #include <linux/types.h> #include <asm/asm.h> #include <asm/traps.h> #include "sgx.h" /* Retrieve the encoded trapnr from the specified return code. */ #define ENCLS_TRAPNR(r) ((r) & ~SGX_ENCLS_FAULT_FLAG) /* Issue a WARN() about an ENCLS function. */ #define ENCLS_WARN(r, name) { \ do { \ int _r = (r); \ WARN_ONCE(_r, "%s returned %d (0x%x)\n", (name), _r, _r); \ } while (0); \ } /* * encls_faulted() - Check if an ENCLS leaf faulted given an error code * @ret: the return value of an ENCLS leaf function call * * Return: * - true: ENCLS leaf faulted. * - false: Otherwise. */ static inline bool encls_faulted(int ret) { return ret & SGX_ENCLS_FAULT_FLAG; } /** * encls_failed() - Check if an ENCLS function failed * @ret: the return value of an ENCLS function call * * Check if an ENCLS function failed. This happens when the function causes a * fault that is not caused by an EPCM conflict or when the function returns a * non-zero value. */ static inline bool encls_failed(int ret) { if (encls_faulted(ret)) return ENCLS_TRAPNR(ret) != X86_TRAP_PF; return !!ret; } /** * __encls_ret_N - encode an ENCLS function that returns an error code in EAX * @rax: function number * @inputs: asm inputs for the function * * Emit assembly for an ENCLS function that returns an error code, e.g. EREMOVE. * And because SGX isn't complex enough as it is, function that return an error * code also modify flags. * * Return: * 0 on success, * SGX error code on failure */ #define __encls_ret_N(rax, inputs...) \ ({ \ int ret; \ asm volatile( \ "1: .byte 0x0f, 0x01, 0xcf;\n\t" \ "2:\n" \ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \ : "=a"(ret) \ : "a"(rax), inputs \ : "memory", "cc"); \ ret; \ }) #define __encls_ret_1(rax, rcx) \ ({ \ __encls_ret_N(rax, "c"(rcx)); \ }) #define __encls_ret_2(rax, rbx, rcx) \ ({ \ __encls_ret_N(rax, "b"(rbx), "c"(rcx)); \ }) #define __encls_ret_3(rax, rbx, rcx, rdx) \ ({ \ __encls_ret_N(rax, "b"(rbx), "c"(rcx), "d"(rdx)); \ }) /** * __encls_N - encode an ENCLS function that doesn't return an error code * @rax: function number * @rbx_out: optional output variable * @inputs: asm inputs for the function * * Emit assembly for an ENCLS function that does not return an error code, e.g. * ECREATE. Leaves without error codes either succeed or fault. @rbx_out is an * optional parameter for use by EDGBRD, which returns the requested value in * RBX. * * Return: * 0 on success, * trapnr with SGX_ENCLS_FAULT_FLAG set on fault */ #define __encls_N(rax, rbx_out, inputs...) \ ({ \ int ret; \ asm volatile( \ "1: .byte 0x0f, 0x01, 0xcf;\n\t" \ " xor %%eax,%%eax;\n" \ "2:\n" \ _ASM_EXTABLE_TYPE(1b, 2b, EX_TYPE_FAULT_SGX) \ : "=a"(ret), "=b"(rbx_out) \ : "a"(rax), inputs \ : "memory"); \ ret; \ }) #define __encls_2(rax, rbx, rcx) \ ({ \ unsigned long ign_rbx_out; \ __encls_N(rax, ign_rbx_out, "b"(rbx), "c"(rcx)); \ }) #define __encls_1_1(rax, data, rcx) \ ({ \ unsigned long rbx_out; \ int ret = __encls_N(rax, rbx_out, "c"(rcx)); \ if (!ret) \ data = rbx_out; \ ret; \ }) /* Initialize an EPC page into an SGX Enclave Control Structure (SECS) page. */ static inline int __ecreate(struct sgx_pageinfo *pginfo, void *secs) { return __encls_2(ECREATE, pginfo, secs); } /* Hash a 256 byte region of an enclave page to SECS:MRENCLAVE. */ static inline int __eextend(void *secs, void *addr) { return __encls_2(EEXTEND, secs, addr); } /* * Associate an EPC page to an enclave either as a REG or TCS page * populated with the provided data. */ static inline int __eadd(struct sgx_pageinfo *pginfo, void *addr) { return __encls_2(EADD, pginfo, addr); } /* Finalize enclave build, initialize enclave for user code execution. */ static inline int __einit(void *sigstruct, void *token, void *secs) { return __encls_ret_3(EINIT, sigstruct, secs, token); } /* Disassociate EPC page from its enclave and mark it as unused. */ static inline int __eremove(void *addr) { return __encls_ret_1(EREMOVE, addr); } /* Copy data to an EPC page belonging to a debug enclave. */ static inline int __edbgwr(void *addr, unsigned long *data) { return __encls_2(EDGBWR, *data, addr); } /* Copy data from an EPC page belonging to a debug enclave. */ static inline int __edbgrd(void *addr, unsigned long *data) { return __encls_1_1(EDGBRD, *data, addr); } /* Track that software has completed the required TLB address clears. */ static inline int __etrack(void *addr) { return __encls_ret_1(ETRACK, addr); } /* Load, verify, and unblock an EPC page. */ static inline int __eldu(struct sgx_pageinfo *pginfo, void *addr, void *va) { return __encls_ret_3(ELDU, pginfo, addr, va); } /* Make EPC page inaccessible to enclave, ready to be written to memory. */ static inline int __eblock(void *addr) { return __encls_ret_1(EBLOCK, addr); } /* Initialize an EPC page into a Version Array (VA) page. */ static inline int __epa(void *addr) { unsigned long rbx = SGX_PAGE_TYPE_VA; return __encls_2(EPA, rbx, addr); } /* Invalidate an EPC page and write it out to main memory. */ static inline int __ewb(struct sgx_pageinfo *pginfo, void *addr, void *va) { return __encls_ret_3(EWB, pginfo, addr, va); } /* Restrict the EPCM permissions of an EPC page. */ static inline int __emodpr(struct sgx_secinfo *secinfo, void *addr) { return __encls_ret_2(EMODPR, secinfo, addr); } /* Change the type of an EPC page. */ static inline int __emodt(struct sgx_secinfo *secinfo, void *addr) { return __encls_ret_2(EMODT, secinfo, addr); } /* Zero a page of EPC memory and add it to an initialized enclave. */ static inline int __eaug(struct sgx_pageinfo *pginfo, void *addr) { return __encls_2(EAUG, pginfo, addr); } #endif /* _X86_ENCLS_H */
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2003-2013 Jozsef Kadlecsik <[email protected]> */ /* Kernel module implementing an IP set type: the hash:ip,port,net type */ #include <linux/jhash.h> #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/random.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/netlink.h> #include <net/tcp.h> #include <linux/netfilter.h> #include <linux/netfilter/ipset/pfxlen.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_getport.h> #include <linux/netfilter/ipset/ip_set_hash.h> #define IPSET_TYPE_REV_MIN 0 /* 1 SCTP and UDPLITE support added */ /* 2 Range as input support for IPv4 added */ /* 3 nomatch flag support added */ /* 4 Counters support added */ /* 5 Comments support added */ /* 6 Forceadd support added */ /* 7 skbinfo support added */ #define IPSET_TYPE_REV_MAX 8 /* bucketsize, initval support added */ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <[email protected]>"); IP_SET_MODULE_DESC("hash:ip,port,net", IPSET_TYPE_REV_MIN, IPSET_TYPE_REV_MAX); MODULE_ALIAS("ip_set_hash:ip,port,net"); /* Type specific function prefix */ #define HTYPE hash_ipportnet /* We squeeze the "nomatch" flag into cidr: we don't support cidr == 0 * However this way we have to store internally cidr - 1, * dancing back and forth. */ #define IP_SET_HASH_WITH_NETS_PACKED #define IP_SET_HASH_WITH_PROTO #define IP_SET_HASH_WITH_NETS /* IPv4 variant */ /* Member elements */ struct hash_ipportnet4_elem { __be32 ip; __be32 ip2; __be16 port; u8 cidr:7; u8 nomatch:1; u8 proto; }; /* Common functions */ static bool hash_ipportnet4_data_equal(const struct hash_ipportnet4_elem *ip1, const struct hash_ipportnet4_elem *ip2, u32 *multi) { return ip1->ip == ip2->ip && ip1->ip2 == ip2->ip2 && ip1->cidr == ip2->cidr && ip1->port == ip2->port && ip1->proto == ip2->proto; } static int hash_ipportnet4_do_data_match(const struct hash_ipportnet4_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_ipportnet4_data_set_flags(struct hash_ipportnet4_elem *elem, u32 flags) { elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH); } static void hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_ipportnet4_data_netmask(struct hash_ipportnet4_elem *elem, u8 cidr) { elem->ip2 &= ip_set_netmask(cidr); elem->cidr = cidr - 1; } static bool hash_ipportnet4_data_list(struct sk_buff *skb, const struct hash_ipportnet4_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; if (nla_put_ipaddr4(skb, IPSET_ATTR_IP, data->ip) || nla_put_ipaddr4(skb, IPSET_ATTR_IP2, data->ip2) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_ipportnet4_data_next(struct hash_ipportnet4_elem *next, const struct hash_ipportnet4_elem *d) { next->ip = d->ip; next->port = d->port; next->ip2 = d->ip2; } #define MTYPE hash_ipportnet4 #define HOST_MASK 32 #include "ip_set_hash_gen.h" static int hash_ipportnet4_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_ipportnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK - 1; if (!ip_set_get_ip4_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip4addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip); ip4addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2); e.ip2 &= ip_set_netmask(e.cidr + 1); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct hash_ipportnet4 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet4_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 ip = 0, ip_to = 0, p = 0, port, port_to; u32 ip2_from = 0, ip2_to = 0, ip2, i = 0; bool with_ports = false; u8 cidr; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP], &ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2], &ip2_from); if (ret) return ret; if (tb[IPSET_ATTR_CIDR2]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; e.cidr = cidr - 1; } e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else { return -IPSET_ERR_MISSING_PROTO; } if (!(with_ports || e.proto == IPPROTO_ICMP)) e.port = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); } with_ports = with_ports && tb[IPSET_ATTR_PORT_TO]; if (adt == IPSET_TEST || !(tb[IPSET_ATTR_CIDR] || tb[IPSET_ATTR_IP_TO] || with_ports || tb[IPSET_ATTR_IP2_TO])) { e.ip = htonl(ip); e.ip2 = htonl(ip2_from & ip_set_hostmask(e.cidr + 1)); ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } ip_to = ip; if (tb[IPSET_ATTR_IP_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to); if (ret) return ret; if (ip > ip_to) swap(ip, ip_to); } else if (tb[IPSET_ATTR_CIDR]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; ip_set_mask_from_to(ip, ip_to, cidr); } port_to = port = ntohs(e.port); if (tb[IPSET_ATTR_PORT_TO]) { port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); } ip2_to = ip2_from; if (tb[IPSET_ATTR_IP2_TO]) { ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to); if (ret) return ret; if (ip2_from > ip2_to) swap(ip2_from, ip2_to); if (ip2_from + UINT_MAX == ip2_to) return -IPSET_ERR_HASH_RANGE; } else { ip_set_mask_from_to(ip2_from, ip2_to, e.cidr + 1); } if (retried) { ip = ntohl(h->next.ip); p = ntohs(h->next.port); ip2 = ntohl(h->next.ip2); } else { p = port; ip2 = ip2_from; } for (; ip <= ip_to; ip++) { e.ip = htonl(ip); for (; p <= port_to; p++) { e.port = htons(p); do { i++; e.ip2 = htonl(ip2); ip2 = ip_set_range_to_cidr(ip2, ip2_to, &cidr); e.cidr = cidr - 1; if (i > IPSET_MAX_RANGE) { hash_ipportnet4_data_next(&h->next, &e); return -ERANGE; } ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } while (ip2++ < ip2_to); ip2 = ip2_from; } p = port; } return ret; } /* IPv6 variant */ struct hash_ipportnet6_elem { union nf_inet_addr ip; union nf_inet_addr ip2; __be16 port; u8 cidr:7; u8 nomatch:1; u8 proto; }; /* Common functions */ static bool hash_ipportnet6_data_equal(const struct hash_ipportnet6_elem *ip1, const struct hash_ipportnet6_elem *ip2, u32 *multi) { return ipv6_addr_equal(&ip1->ip.in6, &ip2->ip.in6) && ipv6_addr_equal(&ip1->ip2.in6, &ip2->ip2.in6) && ip1->cidr == ip2->cidr && ip1->port == ip2->port && ip1->proto == ip2->proto; } static int hash_ipportnet6_do_data_match(const struct hash_ipportnet6_elem *elem) { return elem->nomatch ? -ENOTEMPTY : 1; } static void hash_ipportnet6_data_set_flags(struct hash_ipportnet6_elem *elem, u32 flags) { elem->nomatch = !!((flags >> 16) & IPSET_FLAG_NOMATCH); } static void hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *elem, u8 *flags) { swap(*flags, elem->nomatch); } static void hash_ipportnet6_data_netmask(struct hash_ipportnet6_elem *elem, u8 cidr) { ip6_netmask(&elem->ip2, cidr); elem->cidr = cidr - 1; } static bool hash_ipportnet6_data_list(struct sk_buff *skb, const struct hash_ipportnet6_elem *data) { u32 flags = data->nomatch ? IPSET_FLAG_NOMATCH : 0; if (nla_put_ipaddr6(skb, IPSET_ATTR_IP, &data->ip.in6) || nla_put_ipaddr6(skb, IPSET_ATTR_IP2, &data->ip2.in6) || nla_put_net16(skb, IPSET_ATTR_PORT, data->port) || nla_put_u8(skb, IPSET_ATTR_CIDR2, data->cidr + 1) || nla_put_u8(skb, IPSET_ATTR_PROTO, data->proto) || (flags && nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(flags)))) goto nla_put_failure; return false; nla_put_failure: return true; } static void hash_ipportnet6_data_next(struct hash_ipportnet6_elem *next, const struct hash_ipportnet6_elem *d) { next->port = d->port; } #undef MTYPE #undef HOST_MASK #define MTYPE hash_ipportnet6 #define HOST_MASK 128 #define IP_SET_EMIT_CREATE #include "ip_set_hash_gen.h" static int hash_ipportnet6_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, struct ip_set_adt_opt *opt) { const struct hash_ipportnet6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet6_elem e = { .cidr = INIT_CIDR(h->nets[0].cidr[0], HOST_MASK), }; struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set); if (adt == IPSET_TEST) e.cidr = HOST_MASK - 1; if (!ip_set_get_ip6_port(skb, opt->flags & IPSET_DIM_TWO_SRC, &e.port, &e.proto)) return -EINVAL; ip6addrptr(skb, opt->flags & IPSET_DIM_ONE_SRC, &e.ip.in6); ip6addrptr(skb, opt->flags & IPSET_DIM_THREE_SRC, &e.ip2.in6); ip6_netmask(&e.ip2, e.cidr + 1); return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags); } static int hash_ipportnet6_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { const struct hash_ipportnet6 *h = set->data; ipset_adtfn adtfn = set->variant->adt[adt]; struct hash_ipportnet6_elem e = { .cidr = HOST_MASK - 1 }; struct ip_set_ext ext = IP_SET_INIT_UEXT(set); u32 port, port_to; bool with_ports = false; u8 cidr; int ret; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); if (unlikely(!tb[IPSET_ATTR_IP] || !tb[IPSET_ATTR_IP2] || !ip_set_attr_netorder(tb, IPSET_ATTR_PORT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_PORT_TO) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (unlikely(tb[IPSET_ATTR_IP_TO])) return -IPSET_ERR_HASH_RANGE_UNSUPPORTED; if (unlikely(tb[IPSET_ATTR_CIDR])) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR]); if (cidr != HOST_MASK) return -IPSET_ERR_INVALID_CIDR; } ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP], &e.ip); if (ret) return ret; ret = ip_set_get_extensions(set, tb, &ext); if (ret) return ret; ret = ip_set_get_ipaddr6(tb[IPSET_ATTR_IP2], &e.ip2); if (ret) return ret; if (tb[IPSET_ATTR_CIDR2]) { cidr = nla_get_u8(tb[IPSET_ATTR_CIDR2]); if (!cidr || cidr > HOST_MASK) return -IPSET_ERR_INVALID_CIDR; e.cidr = cidr - 1; } ip6_netmask(&e.ip2, e.cidr + 1); e.port = nla_get_be16(tb[IPSET_ATTR_PORT]); if (tb[IPSET_ATTR_PROTO]) { e.proto = nla_get_u8(tb[IPSET_ATTR_PROTO]); with_ports = ip_set_proto_with_ports(e.proto); if (e.proto == 0) return -IPSET_ERR_INVALID_PROTO; } else { return -IPSET_ERR_MISSING_PROTO; } if (!(with_ports || e.proto == IPPROTO_ICMPV6)) e.port = 0; if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); if (cadt_flags & IPSET_FLAG_NOMATCH) flags |= (IPSET_FLAG_NOMATCH << 16); } if (adt == IPSET_TEST || !with_ports || !tb[IPSET_ATTR_PORT_TO]) { ret = adtfn(set, &e, &ext, &ext, flags); return ip_set_enomatch(ret, flags, adt, set) ? -ret : ip_set_eexist(ret, flags) ? 0 : ret; } port = ntohs(e.port); port_to = ip_set_get_h16(tb[IPSET_ATTR_PORT_TO]); if (port > port_to) swap(port, port_to); if (retried) port = ntohs(h->next.port); for (; port <= port_to; port++) { e.port = htons(port); ret = adtfn(set, &e, &ext, &ext, flags); if (ret && !ip_set_eexist(ret, flags)) return ret; ret = 0; } return ret; } static struct ip_set_type hash_ipportnet_type __read_mostly = { .name = "hash:ip,port,net", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_IP | IPSET_TYPE_PORT | IPSET_TYPE_IP2 | IPSET_TYPE_NOMATCH, .dimension = IPSET_DIM_THREE, .family = NFPROTO_UNSPEC, .revision_min = IPSET_TYPE_REV_MIN, .revision_max = IPSET_TYPE_REV_MAX, .create_flags[IPSET_TYPE_REV_MAX] = IPSET_CREATE_FLAG_BUCKETSIZE, .create = hash_ipportnet_create, .create_policy = { [IPSET_ATTR_HASHSIZE] = { .type = NLA_U32 }, [IPSET_ATTR_MAXELEM] = { .type = NLA_U32 }, [IPSET_ATTR_INITVAL] = { .type = NLA_U32 }, [IPSET_ATTR_BUCKETSIZE] = { .type = NLA_U8 }, [IPSET_ATTR_RESIZE] = { .type = NLA_U8 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_IP] = { .type = NLA_NESTED }, [IPSET_ATTR_IP_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_IP2] = { .type = NLA_NESTED }, [IPSET_ATTR_IP2_TO] = { .type = NLA_NESTED }, [IPSET_ATTR_PORT] = { .type = NLA_U16 }, [IPSET_ATTR_PORT_TO] = { .type = NLA_U16 }, [IPSET_ATTR_CIDR] = { .type = NLA_U8 }, [IPSET_ATTR_CIDR2] = { .type = NLA_U8 }, [IPSET_ATTR_PROTO] = { .type = NLA_U8 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_BYTES] = { .type = NLA_U64 }, [IPSET_ATTR_PACKETS] = { .type = NLA_U64 }, [IPSET_ATTR_COMMENT] = { .type = NLA_NUL_STRING, .len = IPSET_MAX_COMMENT_SIZE }, [IPSET_ATTR_SKBMARK] = { .type = NLA_U64 }, [IPSET_ATTR_SKBPRIO] = { .type = NLA_U32 }, [IPSET_ATTR_SKBQUEUE] = { .type = NLA_U16 }, }, .me = THIS_MODULE, }; static int __init hash_ipportnet_init(void) { return ip_set_type_register(&hash_ipportnet_type); } static void __exit hash_ipportnet_fini(void) { rcu_barrier(); ip_set_type_unregister(&hash_ipportnet_type); } module_init(hash_ipportnet_init); module_exit(hash_ipportnet_fini);
// SPDX-License-Identifier: GPL-2.0-or-later /* Mantis VP-3030 driver Copyright (C) Manu Abraham ([email protected]) */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <media/dmxdev.h> #include <media/dvbdev.h> #include <media/dvb_demux.h> #include <media/dvb_frontend.h> #include <media/dvb_net.h> #include "zl10353.h" #include "tda665x.h" #include "mantis_common.h" #include "mantis_ioc.h" #include "mantis_dvb.h" #include "mantis_vp3030.h" static struct zl10353_config mantis_vp3030_config = { .demod_address = 0x0f, }; static struct tda665x_config env57h12d5_config = { .name = "ENV57H12D5 (ET-50DT)", .addr = 0x60, .frequency_min = 47 * MHz, .frequency_max = 862 * MHz, .frequency_offst = 3616667, .ref_multiplier = 6, /* 1/6 MHz */ .ref_divider = 100000, /* 1/6 MHz */ }; #define MANTIS_MODEL_NAME "VP-3030" #define MANTIS_DEV_TYPE "DVB-T" static int vp3030_frontend_init(struct mantis_pci *mantis, struct dvb_frontend *fe) { struct i2c_adapter *adapter = &mantis->adapter; struct mantis_hwconfig *config = mantis->hwconfig; int err = 0; mantis_gpio_set_bits(mantis, config->reset, 0); msleep(100); err = mantis_frontend_power(mantis, POWER_ON); msleep(100); mantis_gpio_set_bits(mantis, config->reset, 1); if (err == 0) { msleep(250); dprintk(MANTIS_ERROR, 1, "Probing for 10353 (DVB-T)"); fe = dvb_attach(zl10353_attach, &mantis_vp3030_config, adapter); if (!fe) return -1; dvb_attach(tda665x_attach, fe, &env57h12d5_config, adapter); } else { dprintk(MANTIS_ERROR, 1, "Frontend on <%s> POWER ON failed! <%d>", adapter->name, err); return -EIO; } mantis->fe = fe; dprintk(MANTIS_ERROR, 1, "Done!"); return 0; } struct mantis_hwconfig vp3030_config = { .model_name = MANTIS_MODEL_NAME, .dev_type = MANTIS_DEV_TYPE, .ts_size = MANTIS_TS_188, .baud_rate = MANTIS_BAUD_9600, .parity = MANTIS_PARITY_NONE, .bytes = 0, .frontend_init = vp3030_frontend_init, .power = GPIF_A12, .reset = GPIF_A13, .i2c_mode = MANTIS_BYTE_MODE };
/* * P1010/P1014 Silicon/SoC Device Tree Source (post include) * * Copyright 2011 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ &ifc { #address-cells = <2>; #size-cells = <1>; compatible = "fsl,ifc"; interrupts = <16 2 0 0 19 2 0 0>; }; /* controller at 0x9000 */ &pci0 { compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; bus-range = <0 255>; clock-frequency = <33333333>; interrupts = <16 2 0 0>; pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; #size-cells = <2>; #address-cells = <3>; device_type = "pci"; interrupts = <16 2 0 0>; interrupt-map-mask = <0xf800 0 0 7>; interrupt-map = < /* IDSEL 0x0 */ 0000 0x0 0x0 0x1 &mpic 0x4 0x1 0x0 0x0 0000 0x0 0x0 0x2 &mpic 0x5 0x1 0x0 0x0 0000 0x0 0x0 0x3 &mpic 0x6 0x1 0x0 0x0 0000 0x0 0x0 0x4 &mpic 0x7 0x1 0x0 0x0 >; }; }; /* controller at 0xa000 */ &pci1 { compatible = "fsl,p1010-pcie", "fsl,qoriq-pcie-v2.3"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; bus-range = <0 255>; clock-frequency = <33333333>; interrupts = <16 2 0 0>; pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; #size-cells = <2>; #address-cells = <3>; device_type = "pci"; interrupts = <16 2 0 0>; interrupt-map-mask = <0xf800 0 0 7>; interrupt-map = < /* IDSEL 0x0 */ 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0x0 0x0 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0x0 0x0 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0x0 0x0 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0x0 0x0 >; }; }; &soc { #address-cells = <1>; #size-cells = <1>; device_type = "soc"; compatible = "fsl,p1010-immr", "simple-bus"; bus-frequency = <0>; // Filled out by uboot. ecm-law@0 { compatible = "fsl,ecm-law"; reg = <0x0 0x1000>; fsl,num-laws = <12>; }; ecm@1000 { compatible = "fsl,p1010-ecm", "fsl,ecm"; reg = <0x1000 0x1000>; interrupts = <16 2 0 0>; }; memory-controller@2000 { compatible = "fsl,p1010-memory-controller"; reg = <0x2000 0x1000>; interrupts = <16 2 0 0>; }; /include/ "pq3-i2c-0.dtsi" i2c@3000 { fsl,i2c-erratum-a004447; }; /include/ "pq3-i2c-1.dtsi" i2c@3100 { fsl,i2c-erratum-a004447; }; /include/ "pq3-duart-0.dtsi" /include/ "pq3-espi-0.dtsi" spi0: spi@7000 { fsl,espi-num-chipselects = <1>; }; /include/ "pq3-gpio-0.dtsi" /include/ "pq3-sata2-0.dtsi" /include/ "pq3-sata2-1.dtsi" can0: can@1c000 { compatible = "fsl,p1010-flexcan"; reg = <0x1c000 0x1000>; interrupts = <48 0x2 0 0>; big-endian; }; can1: can@1d000 { compatible = "fsl,p1010-flexcan"; reg = <0x1d000 0x1000>; interrupts = <61 0x2 0 0>; big-endian; }; L2: l2-cache-controller@20000 { compatible = "fsl,p1010-l2-cache-controller", "fsl,p1014-l2-cache-controller"; reg = <0x20000 0x1000>; cache-line-size = <32>; // 32 bytes cache-size = <0x40000>; // L2,256K interrupts = <16 2 0 0>; }; /include/ "pq3-dma-0.dtsi" /include/ "pq3-usb2-dr-0.dtsi" usb@22000 { compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr"; }; /include/ "pq3-esdhc-0.dtsi" sdhc@2e000 { compatible = "fsl,p1010-esdhc", "fsl,esdhc"; sdhci,auto-cmd12; }; /include/ "pq3-sec4.4-0.dtsi" /include/ "pq3-mpic.dtsi" /include/ "pq3-mpic-timer-B.dtsi" /include/ "pq3-etsec2-0.dtsi" /include/ "pq3-etsec2-1.dtsi" /include/ "pq3-etsec2-2.dtsi" enet0: ethernet@b0000 { fsl,pmc-handle = <&etsec1_clk>; }; enet1: ethernet@b1000 { fsl,pmc-handle = <&etsec2_clk>; }; enet2: ethernet@b2000 { fsl,pmc-handle = <&etsec3_clk>; }; global-utilities@e0000 { compatible = "fsl,p1010-guts"; reg = <0xe0000 0x1000>; fsl,has-rstcr; }; /include/ "pq3-power.dtsi" };
// SPDX-License-Identifier: GPL-2.0 /* * fixmaps for parisc * * Copyright (c) 2019 Sven Schnelle <[email protected]> */ #include <linux/kprobes.h> #include <linux/mm.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> void notrace set_fixmap(enum fixed_addresses idx, phys_addr_t phys) { unsigned long vaddr = __fix_to_virt(idx); pgd_t *pgd = pgd_offset_k(vaddr); p4d_t *p4d = p4d_offset(pgd, vaddr); pud_t *pud = pud_offset(p4d, vaddr); pmd_t *pmd = pmd_offset(pud, vaddr); pte_t *pte; pte = pte_offset_kernel(pmd, vaddr); set_pte_at(&init_mm, vaddr, pte, __mk_pte(phys, PAGE_KERNEL_RWX)); flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); } void notrace clear_fixmap(enum fixed_addresses idx) { unsigned long vaddr = __fix_to_virt(idx); pte_t *pte = virt_to_kpte(vaddr); if (WARN_ON(pte_none(*pte))) return; pte_clear(&init_mm, vaddr, pte); flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE); }
// SPDX-License-Identifier: GPL-2.0-or-later /* * TQM 8541 Device Tree Source * * Copyright 2008 Freescale Semiconductor Inc. */ /dts-v1/; /include/ "fsl/e500v1_power_isa.dtsi" / { model = "tqc,tqm8541"; compatible = "tqc,tqm8541"; #address-cells = <1>; #size-cells = <1>; aliases { ethernet0 = &enet0; ethernet1 = &enet1; serial0 = &serial0; serial1 = &serial1; pci0 = &pci0; }; cpus { #address-cells = <1>; #size-cells = <0>; PowerPC,8541@0 { device_type = "cpu"; reg = <0>; d-cache-line-size = <32>; i-cache-line-size = <32>; d-cache-size = <32768>; i-cache-size = <32768>; timebase-frequency = <0>; bus-frequency = <0>; clock-frequency = <0>; next-level-cache = <&L2>; }; }; memory { device_type = "memory"; reg = <0x00000000 0x10000000>; }; soc@e0000000 { #address-cells = <1>; #size-cells = <1>; device_type = "soc"; ranges = <0x0 0xe0000000 0x100000>; bus-frequency = <0>; compatible = "fsl,mpc8541-immr", "simple-bus"; ecm-law@0 { compatible = "fsl,ecm-law"; reg = <0x0 0x1000>; fsl,num-laws = <8>; }; ecm@1000 { compatible = "fsl,mpc8541-ecm", "fsl,ecm"; reg = <0x1000 0x1000>; interrupts = <17 2>; interrupt-parent = <&mpic>; }; memory-controller@2000 { compatible = "fsl,mpc8540-memory-controller"; reg = <0x2000 0x1000>; interrupt-parent = <&mpic>; interrupts = <18 2>; }; L2: l2-cache-controller@20000 { compatible = "fsl,mpc8540-l2-cache-controller"; reg = <0x20000 0x1000>; cache-line-size = <32>; cache-size = <0x40000>; // L2, 256K interrupt-parent = <&mpic>; interrupts = <16 2>; }; i2c@3000 { #address-cells = <1>; #size-cells = <0>; cell-index = <0>; compatible = "fsl-i2c"; reg = <0x3000 0x100>; interrupts = <43 2>; interrupt-parent = <&mpic>; dfsrr; dtt@48 { compatible = "national,lm75"; reg = <0x48>; }; rtc@68 { compatible = "dallas,ds1337"; reg = <0x68>; }; }; dma@21300 { #address-cells = <1>; #size-cells = <1>; compatible = "fsl,mpc8541-dma", "fsl,eloplus-dma"; reg = <0x21300 0x4>; ranges = <0x0 0x21100 0x200>; cell-index = <0>; dma-channel@0 { compatible = "fsl,mpc8541-dma-channel", "fsl,eloplus-dma-channel"; reg = <0x0 0x80>; cell-index = <0>; interrupt-parent = <&mpic>; interrupts = <20 2>; }; dma-channel@80 { compatible = "fsl,mpc8541-dma-channel", "fsl,eloplus-dma-channel"; reg = <0x80 0x80>; cell-index = <1>; interrupt-parent = <&mpic>; interrupts = <21 2>; }; dma-channel@100 { compatible = "fsl,mpc8541-dma-channel", "fsl,eloplus-dma-channel"; reg = <0x100 0x80>; cell-index = <2>; interrupt-parent = <&mpic>; interrupts = <22 2>; }; dma-channel@180 { compatible = "fsl,mpc8541-dma-channel", "fsl,eloplus-dma-channel"; reg = <0x180 0x80>; cell-index = <3>; interrupt-parent = <&mpic>; interrupts = <23 2>; }; }; enet0: ethernet@24000 { #address-cells = <1>; #size-cells = <1>; cell-index = <0>; device_type = "network"; model = "TSEC"; compatible = "gianfar"; reg = <0x24000 0x1000>; ranges = <0x0 0x24000 0x1000>; local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <29 2 30 2 34 2>; interrupt-parent = <&mpic>; tbi-handle = <&tbi0>; phy-handle = <&phy2>; mdio@520 { #address-cells = <1>; #size-cells = <0>; compatible = "fsl,gianfar-mdio"; reg = <0x520 0x20>; phy1: ethernet-phy@1 { interrupt-parent = <&mpic>; interrupts = <8 1>; reg = <1>; }; phy2: ethernet-phy@2 { interrupt-parent = <&mpic>; interrupts = <8 1>; reg = <2>; }; phy3: ethernet-phy@3 { interrupt-parent = <&mpic>; interrupts = <8 1>; reg = <3>; }; tbi0: tbi-phy@11 { reg = <0x11>; device_type = "tbi-phy"; }; }; }; enet1: ethernet@25000 { #address-cells = <1>; #size-cells = <1>; cell-index = <1>; device_type = "network"; model = "TSEC"; compatible = "gianfar"; reg = <0x25000 0x1000>; ranges = <0x0 0x25000 0x1000>; local-mac-address = [ 00 00 00 00 00 00 ]; interrupts = <35 2 36 2 40 2>; interrupt-parent = <&mpic>; tbi-handle = <&tbi1>; phy-handle = <&phy1>; mdio@520 { #address-cells = <1>; #size-cells = <0>; compatible = "fsl,gianfar-tbi"; reg = <0x520 0x20>; tbi1: tbi-phy@11 { reg = <0x11>; device_type = "tbi-phy"; }; }; }; serial0: serial@4500 { cell-index = <0>; device_type = "serial"; compatible = "fsl,ns16550", "ns16550"; reg = <0x4500 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; interrupt-parent = <&mpic>; }; serial1: serial@4600 { cell-index = <1>; device_type = "serial"; compatible = "fsl,ns16550", "ns16550"; reg = <0x4600 0x100>; // reg base, size clock-frequency = <0>; // should we fill in in uboot? interrupts = <42 2>; interrupt-parent = <&mpic>; }; crypto@30000 { compatible = "fsl,sec2.0"; reg = <0x30000 0x10000>; interrupts = <45 2>; interrupt-parent = <&mpic>; fsl,num-channels = <4>; fsl,channel-fifo-len = <24>; fsl,exec-units-mask = <0x7e>; fsl,descriptor-types-mask = <0x01010ebf>; }; mpic: pic@40000 { interrupt-controller; #address-cells = <0>; #interrupt-cells = <2>; reg = <0x40000 0x40000>; device_type = "open-pic"; compatible = "chrp,open-pic"; }; cpm@919c0 { #address-cells = <1>; #size-cells = <1>; compatible = "fsl,mpc8541-cpm", "fsl,cpm2", "simple-bus"; reg = <0x919c0 0x30>; ranges; muram@80000 { #address-cells = <1>; #size-cells = <1>; ranges = <0 0x80000 0x10000>; data@0 { compatible = "fsl,cpm-muram-data"; reg = <0 0x2000 0x9000 0x1000>; }; }; brg@919f0 { compatible = "fsl,mpc8541-brg", "fsl,cpm2-brg", "fsl,cpm-brg"; reg = <0x919f0 0x10 0x915f0 0x10>; clock-frequency = <0>; }; cpmpic: pic@90c00 { interrupt-controller; #address-cells = <0>; #interrupt-cells = <2>; interrupts = <46 2>; interrupt-parent = <&mpic>; reg = <0x90c00 0x80>; compatible = "fsl,mpc8541-cpm-pic", "fsl,cpm2-pic"; }; }; }; pci0: pci@e0008000 { #interrupt-cells = <1>; #size-cells = <2>; #address-cells = <3>; compatible = "fsl,mpc8540-pcix", "fsl,mpc8540-pci"; device_type = "pci"; reg = <0xe0008000 0x1000>; clock-frequency = <66666666>; interrupt-map-mask = <0xf800 0x0 0x0 0x7>; interrupt-map = < /* IDSEL 28 */ 0xe000 0 0 1 &mpic 2 1 0xe000 0 0 2 &mpic 3 1 0xe000 0 0 3 &mpic 6 1 0xe000 0 0 4 &mpic 5 1 /* IDSEL 11 */ 0x5800 0 0 1 &mpic 6 1 0x5800 0 0 2 &mpic 5 1 >; interrupt-parent = <&mpic>; interrupts = <24 2>; bus-range = <0 0>; ranges = <0x02000000 0 0x80000000 0x80000000 0 0x20000000 0x01000000 0 0x00000000 0xe2000000 0 0x01000000>; }; };
// SPDX-License-Identifier: GPL-2.0-only // Copyright (C) 2021-3 ARM Limited. #ifndef FP_PTRACE_H #define FP_PTRACE_H #define SVCR_SM_SHIFT 0 #define SVCR_ZA_SHIFT 1 #define SVCR_SM (1 << SVCR_SM_SHIFT) #define SVCR_ZA (1 << SVCR_ZA_SHIFT) #define HAVE_SVE_SHIFT 0 #define HAVE_SME_SHIFT 1 #define HAVE_SME2_SHIFT 2 #define HAVE_FA64_SHIFT 3 #define HAVE_FPMR_SHIFT 4 #define HAVE_SVE (1 << HAVE_SVE_SHIFT) #define HAVE_SME (1 << HAVE_SME_SHIFT) #define HAVE_SME2 (1 << HAVE_SME2_SHIFT) #define HAVE_FA64 (1 << HAVE_FA64_SHIFT) #define HAVE_FPMR (1 << HAVE_FPMR_SHIFT) #endif
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Unisoc Inc. */ #include <linux/component.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <video/mipi_display.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_bridge.h> #include <drm/drm_of.h> #include <drm/drm_probe_helper.h> #include "sprd_drm.h" #include "sprd_dpu.h" #include "sprd_dsi.h" #define SOFT_RESET 0x04 #define MASK_PROTOCOL_INT 0x0C #define MASK_INTERNAL_INT 0x14 #define DSI_MODE_CFG 0x18 #define VIRTUAL_CHANNEL_ID 0x1C #define GEN_RX_VCID GENMASK(1, 0) #define VIDEO_PKT_VCID GENMASK(3, 2) #define DPI_VIDEO_FORMAT 0x20 #define DPI_VIDEO_MODE_FORMAT GENMASK(5, 0) #define LOOSELY18_EN BIT(6) #define VIDEO_PKT_CONFIG 0x24 #define VIDEO_PKT_SIZE GENMASK(15, 0) #define VIDEO_LINE_CHUNK_NUM GENMASK(31, 16) #define VIDEO_LINE_HBLK_TIME 0x28 #define VIDEO_LINE_HBP_TIME GENMASK(15, 0) #define VIDEO_LINE_HSA_TIME GENMASK(31, 16) #define VIDEO_LINE_TIME 0x2C #define VIDEO_VBLK_LINES 0x30 #define VFP_LINES GENMASK(9, 0) #define VBP_LINES GENMASK(19, 10) #define VSA_LINES GENMASK(29, 20) #define VIDEO_VACTIVE_LINES 0x34 #define VID_MODE_CFG 0x38 #define VID_MODE_TYPE GENMASK(1, 0) #define LP_VSA_EN BIT(8) #define LP_VBP_EN BIT(9) #define LP_VFP_EN BIT(10) #define LP_VACT_EN BIT(11) #define LP_HBP_EN BIT(12) #define LP_HFP_EN BIT(13) #define FRAME_BTA_ACK_EN BIT(14) #define TIMEOUT_CNT_CLK_CONFIG 0x40 #define HTX_TO_CONFIG 0x44 #define LRX_H_TO_CONFIG 0x48 #define TX_ESC_CLK_CONFIG 0x5C #define CMD_MODE_CFG 0x68 #define TEAR_FX_EN BIT(0) #define GEN_HDR 0x6C #define GEN_DT GENMASK(5, 0) #define GEN_VC GENMASK(7, 6) #define GEN_PLD_DATA 0x70 #define PHY_CLK_LANE_LP_CTRL 0x74 #define PHY_CLKLANE_TX_REQ_HS BIT(0) #define AUTO_CLKLANE_CTRL_EN BIT(1) #define PHY_INTERFACE_CTRL 0x78 #define RF_PHY_SHUTDOWN BIT(0) #define RF_PHY_RESET_N BIT(1) #define RF_PHY_CLK_EN BIT(2) #define CMD_MODE_STATUS 0x98 #define GEN_CMD_RDATA_FIFO_EMPTY BIT(1) #define GEN_CMD_WDATA_FIFO_EMPTY BIT(3) #define GEN_CMD_CMD_FIFO_EMPTY BIT(5) #define GEN_CMD_RDCMD_DONE BIT(7) #define PHY_STATUS 0x9C #define PHY_LOCK BIT(1) #define PHY_MIN_STOP_TIME 0xA0 #define PHY_LANE_NUM_CONFIG 0xA4 #define PHY_CLKLANE_TIME_CONFIG 0xA8 #define PHY_CLKLANE_LP_TO_HS_TIME GENMASK(15, 0) #define PHY_CLKLANE_HS_TO_LP_TIME GENMASK(31, 16) #define PHY_DATALANE_TIME_CONFIG 0xAC #define PHY_DATALANE_LP_TO_HS_TIME GENMASK(15, 0) #define PHY_DATALANE_HS_TO_LP_TIME GENMASK(31, 16) #define MAX_READ_TIME 0xB0 #define RX_PKT_CHECK_CONFIG 0xB4 #define RX_PKT_ECC_EN BIT(0) #define RX_PKT_CRC_EN BIT(1) #define TA_EN 0xB8 #define EOTP_EN 0xBC #define TX_EOTP_EN BIT(0) #define RX_EOTP_EN BIT(1) #define VIDEO_NULLPKT_SIZE 0xC0 #define DCS_WM_PKT_SIZE 0xC4 #define VIDEO_SIG_DELAY_CONFIG 0xD0 #define VIDEO_SIG_DELAY GENMASK(23, 0) #define PHY_TST_CTRL0 0xF0 #define PHY_TESTCLR BIT(0) #define PHY_TESTCLK BIT(1) #define PHY_TST_CTRL1 0xF4 #define PHY_TESTDIN GENMASK(7, 0) #define PHY_TESTDOUT GENMASK(15, 8) #define PHY_TESTEN BIT(16) #define host_to_dsi(host) \ container_of(host, struct sprd_dsi, host) static inline u32 dsi_reg_rd(struct dsi_context *ctx, u32 offset, u32 mask, u32 shift) { return (readl(ctx->base + offset) & mask) >> shift; } static inline void dsi_reg_wr(struct dsi_context *ctx, u32 offset, u32 mask, u32 shift, u32 val) { u32 ret; ret = readl(ctx->base + offset); ret &= ~mask; ret |= (val << shift) & mask; writel(ret, ctx->base + offset); } static inline void dsi_reg_up(struct dsi_context *ctx, u32 offset, u32 mask, u32 val) { u32 ret = readl(ctx->base + offset); writel((ret & ~mask) | (val & mask), ctx->base + offset); } static int regmap_tst_io_write(void *context, u32 reg, u32 val) { struct sprd_dsi *dsi = context; struct dsi_context *ctx = &dsi->ctx; if (val > 0xff || reg > 0xff) return -EINVAL; drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, val); dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN); dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0); dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, val); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); return 0; } static int regmap_tst_io_read(void *context, u32 reg, u32 *val) { struct sprd_dsi *dsi = context; struct dsi_context *ctx = &dsi->ctx; int ret; if (reg > 0xff) return -EINVAL; dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, PHY_TESTEN); dsi_reg_wr(ctx, PHY_TST_CTRL1, PHY_TESTDIN, 0, reg); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, PHY_TESTCLK); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLK, 0); dsi_reg_up(ctx, PHY_TST_CTRL1, PHY_TESTEN, 0); udelay(1); ret = dsi_reg_rd(ctx, PHY_TST_CTRL1, PHY_TESTDOUT, 8); if (ret < 0) return ret; *val = ret; drm_dbg(dsi->drm, "reg = 0x%02x, val = 0x%02x\n", reg, *val); return 0; } static const struct regmap_bus regmap_tst_io = { .reg_write = regmap_tst_io_write, .reg_read = regmap_tst_io_read, }; static const struct regmap_config byte_config = { .reg_bits = 8, .val_bits = 8, }; static int dphy_wait_pll_locked(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); int i; for (i = 0; i < 50000; i++) { if (dsi_reg_rd(ctx, PHY_STATUS, PHY_LOCK, 1)) return 0; udelay(3); } drm_err(dsi->drm, "dphy pll can not be locked\n"); return -ETIMEDOUT; } static int dsi_wait_tx_payload_fifo_empty(struct dsi_context *ctx) { int i; for (i = 0; i < 5000; i++) { if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_WDATA_FIFO_EMPTY, 3)) return 0; udelay(1); } return -ETIMEDOUT; } static int dsi_wait_tx_cmd_fifo_empty(struct dsi_context *ctx) { int i; for (i = 0; i < 5000; i++) { if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5)) return 0; udelay(1); } return -ETIMEDOUT; } static int dsi_wait_rd_resp_completed(struct dsi_context *ctx) { int i; for (i = 0; i < 10000; i++) { if (dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDCMD_DONE, 7)) return 0; udelay(10); } return -ETIMEDOUT; } static u16 calc_bytes_per_pixel_x100(int coding) { u16 bpp_x100; switch (coding) { case COLOR_CODE_16BIT_CONFIG1: case COLOR_CODE_16BIT_CONFIG2: case COLOR_CODE_16BIT_CONFIG3: bpp_x100 = 200; break; case COLOR_CODE_18BIT_CONFIG1: case COLOR_CODE_18BIT_CONFIG2: bpp_x100 = 225; break; case COLOR_CODE_24BIT: bpp_x100 = 300; break; case COLOR_CODE_COMPRESSTION: bpp_x100 = 100; break; case COLOR_CODE_20BIT_YCC422_LOOSELY: bpp_x100 = 250; break; case COLOR_CODE_24BIT_YCC422: bpp_x100 = 300; break; case COLOR_CODE_16BIT_YCC422: bpp_x100 = 200; break; case COLOR_CODE_30BIT: bpp_x100 = 375; break; case COLOR_CODE_36BIT: bpp_x100 = 450; break; case COLOR_CODE_12BIT_YCC420: bpp_x100 = 150; break; default: DRM_ERROR("invalid color coding"); bpp_x100 = 0; break; } return bpp_x100; } static u8 calc_video_size_step(int coding) { u8 video_size_step; switch (coding) { case COLOR_CODE_16BIT_CONFIG1: case COLOR_CODE_16BIT_CONFIG2: case COLOR_CODE_16BIT_CONFIG3: case COLOR_CODE_18BIT_CONFIG1: case COLOR_CODE_18BIT_CONFIG2: case COLOR_CODE_24BIT: case COLOR_CODE_COMPRESSTION: return video_size_step = 1; case COLOR_CODE_20BIT_YCC422_LOOSELY: case COLOR_CODE_24BIT_YCC422: case COLOR_CODE_16BIT_YCC422: case COLOR_CODE_30BIT: case COLOR_CODE_36BIT: case COLOR_CODE_12BIT_YCC420: return video_size_step = 2; default: DRM_ERROR("invalid color coding"); return 0; } } static u16 round_video_size(int coding, u16 video_size) { switch (coding) { case COLOR_CODE_16BIT_YCC422: case COLOR_CODE_24BIT_YCC422: case COLOR_CODE_20BIT_YCC422_LOOSELY: case COLOR_CODE_12BIT_YCC420: /* round up active H pixels to a multiple of 2 */ if ((video_size % 2) != 0) video_size += 1; break; default: break; } return video_size; } #define SPRD_MIPI_DSI_FMT_DSC 0xff static u32 fmt_to_coding(u32 fmt) { switch (fmt) { case MIPI_DSI_FMT_RGB565: return COLOR_CODE_16BIT_CONFIG1; case MIPI_DSI_FMT_RGB666: case MIPI_DSI_FMT_RGB666_PACKED: return COLOR_CODE_18BIT_CONFIG1; case MIPI_DSI_FMT_RGB888: return COLOR_CODE_24BIT; case SPRD_MIPI_DSI_FMT_DSC: return COLOR_CODE_COMPRESSTION; default: DRM_ERROR("Unsupported format (%d)\n", fmt); return COLOR_CODE_24BIT; } } #define ns_to_cycle(ns, byte_clk) \ DIV_ROUND_UP((ns) * (byte_clk), 1000000) static void sprd_dsi_init(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); u32 byte_clk = dsi->slave->hs_rate / 8; u16 data_hs2lp, data_lp2hs, clk_hs2lp, clk_lp2hs; u16 max_rd_time; int div; writel(0, ctx->base + SOFT_RESET); writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT); writel(0xffffffff, ctx->base + MASK_INTERNAL_INT); writel(1, ctx->base + DSI_MODE_CFG); dsi_reg_up(ctx, EOTP_EN, RX_EOTP_EN, 0); dsi_reg_up(ctx, EOTP_EN, TX_EOTP_EN, 0); dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_ECC_EN, RX_PKT_ECC_EN); dsi_reg_up(ctx, RX_PKT_CHECK_CONFIG, RX_PKT_CRC_EN, RX_PKT_CRC_EN); writel(1, ctx->base + TA_EN); dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, VIDEO_PKT_VCID, 0); dsi_reg_up(ctx, VIRTUAL_CHANNEL_ID, GEN_RX_VCID, 0); div = DIV_ROUND_UP(byte_clk, dsi->slave->lp_rate); writel(div, ctx->base + TX_ESC_CLK_CONFIG); max_rd_time = ns_to_cycle(ctx->max_rd_time, byte_clk); writel(max_rd_time, ctx->base + MAX_READ_TIME); data_hs2lp = ns_to_cycle(ctx->data_hs2lp, byte_clk); data_lp2hs = ns_to_cycle(ctx->data_lp2hs, byte_clk); clk_hs2lp = ns_to_cycle(ctx->clk_hs2lp, byte_clk); clk_lp2hs = ns_to_cycle(ctx->clk_lp2hs, byte_clk); dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG, PHY_DATALANE_HS_TO_LP_TIME, 16, data_hs2lp); dsi_reg_wr(ctx, PHY_DATALANE_TIME_CONFIG, PHY_DATALANE_LP_TO_HS_TIME, 0, data_lp2hs); dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG, PHY_CLKLANE_HS_TO_LP_TIME, 16, clk_hs2lp); dsi_reg_wr(ctx, PHY_CLKLANE_TIME_CONFIG, PHY_CLKLANE_LP_TO_HS_TIME, 0, clk_lp2hs); writel(1, ctx->base + SOFT_RESET); } /* * Free up resources and shutdown host controller and PHY */ static void sprd_dsi_fini(struct dsi_context *ctx) { writel(0xffffffff, ctx->base + MASK_PROTOCOL_INT); writel(0xffffffff, ctx->base + MASK_INTERNAL_INT); writel(0, ctx->base + SOFT_RESET); } /* * If not in burst mode, it will compute the video and null packet sizes * according to necessity. * Configure timers for data lanes and/or clock lane to return to LP when * bandwidth is not filled by data. */ static int sprd_dsi_dpi_video(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); struct videomode *vm = &ctx->vm; u32 byte_clk = dsi->slave->hs_rate / 8; u16 bpp_x100; u16 video_size; u32 ratio_x1000; u16 null_pkt_size = 0; u8 video_size_step; u32 hs_to; u32 total_bytes; u32 bytes_per_chunk; u32 chunks = 0; u32 bytes_left = 0; u32 chunk_overhead; const u8 pkt_header = 6; u8 coding; int div; u16 hline; u16 byte_cycle; coding = fmt_to_coding(dsi->slave->format); video_size = round_video_size(coding, vm->hactive); bpp_x100 = calc_bytes_per_pixel_x100(coding); video_size_step = calc_video_size_step(coding); ratio_x1000 = byte_clk * 1000 / (vm->pixelclock / 1000); hline = vm->hactive + vm->hsync_len + vm->hfront_porch + vm->hback_porch; writel(0, ctx->base + SOFT_RESET); dsi_reg_wr(ctx, VID_MODE_CFG, FRAME_BTA_ACK_EN, 15, ctx->frame_ack_en); dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding); dsi_reg_wr(ctx, VID_MODE_CFG, VID_MODE_TYPE, 0, ctx->burst_mode); byte_cycle = 95 * hline * ratio_x1000 / 100000; dsi_reg_wr(ctx, VIDEO_SIG_DELAY_CONFIG, VIDEO_SIG_DELAY, 0, byte_cycle); byte_cycle = hline * ratio_x1000 / 1000; writel(byte_cycle, ctx->base + VIDEO_LINE_TIME); byte_cycle = vm->hsync_len * ratio_x1000 / 1000; dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HSA_TIME, 16, byte_cycle); byte_cycle = vm->hback_porch * ratio_x1000 / 1000; dsi_reg_wr(ctx, VIDEO_LINE_HBLK_TIME, VIDEO_LINE_HBP_TIME, 0, byte_cycle); writel(vm->vactive, ctx->base + VIDEO_VACTIVE_LINES); dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VFP_LINES, 0, vm->vfront_porch); dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VBP_LINES, 10, vm->vback_porch); dsi_reg_wr(ctx, VIDEO_VBLK_LINES, VSA_LINES, 20, vm->vsync_len); dsi_reg_up(ctx, VID_MODE_CFG, LP_HBP_EN | LP_HFP_EN | LP_VACT_EN | LP_VFP_EN | LP_VBP_EN | LP_VSA_EN, LP_HBP_EN | LP_HFP_EN | LP_VACT_EN | LP_VFP_EN | LP_VBP_EN | LP_VSA_EN); hs_to = (hline * vm->vactive) + (2 * bpp_x100) / 100; for (div = 0x80; (div < hs_to) && (div > 2); div--) { if ((hs_to % div) == 0) { writel(div, ctx->base + TIMEOUT_CNT_CLK_CONFIG); writel(hs_to / div, ctx->base + LRX_H_TO_CONFIG); writel(hs_to / div, ctx->base + HTX_TO_CONFIG); break; } } if (ctx->burst_mode == VIDEO_BURST_WITH_SYNC_PULSES) { dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size); writel(0, ctx->base + VIDEO_NULLPKT_SIZE); dsi_reg_up(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 0); } else { /* non burst transmission */ null_pkt_size = 0; /* bytes to be sent - first as one chunk */ bytes_per_chunk = vm->hactive * bpp_x100 / 100 + pkt_header; /* hline total bytes from the DPI interface */ total_bytes = (vm->hactive + vm->hfront_porch) * ratio_x1000 / dsi->slave->lanes / 1000; /* check if the pixels actually fit on the DSI link */ if (total_bytes < bytes_per_chunk) { drm_err(dsi->drm, "current resolution can not be set\n"); return -EINVAL; } chunk_overhead = total_bytes - bytes_per_chunk; /* overhead higher than 1 -> enable multi packets */ if (chunk_overhead > 1) { /* multi packets */ for (video_size = video_size_step; video_size < vm->hactive; video_size += video_size_step) { if (vm->hactive * 1000 / video_size % 1000) continue; chunks = vm->hactive / video_size; bytes_per_chunk = bpp_x100 * video_size / 100 + pkt_header; if (total_bytes >= (bytes_per_chunk * chunks)) { bytes_left = total_bytes - bytes_per_chunk * chunks; break; } } /* prevent overflow (unsigned - unsigned) */ if (bytes_left > (pkt_header * chunks)) { null_pkt_size = (bytes_left - pkt_header * chunks) / chunks; /* avoid register overflow */ if (null_pkt_size > 1023) null_pkt_size = 1023; } } else { /* single packet */ chunks = 1; /* must be a multiple of 4 except 18 loosely */ for (video_size = vm->hactive; (video_size % video_size_step) != 0; video_size++) ; } dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_PKT_SIZE, 0, video_size); writel(null_pkt_size, ctx->base + VIDEO_NULLPKT_SIZE); dsi_reg_wr(ctx, VIDEO_PKT_CONFIG, VIDEO_LINE_CHUNK_NUM, 16, chunks); } writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT); writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT); writel(1, ctx->base + SOFT_RESET); return 0; } static void sprd_dsi_edpi_video(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); const u32 fifo_depth = 1096; const u32 word_length = 4; u32 hactive = ctx->vm.hactive; u32 bpp_x100; u32 max_fifo_len; u8 coding; coding = fmt_to_coding(dsi->slave->format); bpp_x100 = calc_bytes_per_pixel_x100(coding); max_fifo_len = word_length * fifo_depth * 100 / bpp_x100; writel(0, ctx->base + SOFT_RESET); dsi_reg_wr(ctx, DPI_VIDEO_FORMAT, DPI_VIDEO_MODE_FORMAT, 0, coding); dsi_reg_wr(ctx, CMD_MODE_CFG, TEAR_FX_EN, 0, ctx->te_ack_en); if (max_fifo_len > hactive) writel(hactive, ctx->base + DCS_WM_PKT_SIZE); else writel(max_fifo_len, ctx->base + DCS_WM_PKT_SIZE); writel(ctx->int0_mask, ctx->base + MASK_PROTOCOL_INT); writel(ctx->int1_mask, ctx->base + MASK_INTERNAL_INT); writel(1, ctx->base + SOFT_RESET); } /* * Send a packet on the generic interface, * this function has an active delay to wait for the buffer to clear. * The delay is limited to: * (param_length / 4) x DSIH_FIFO_ACTIVE_WAIT x register access time * the controller restricts the sending of. * * This function will not be able to send Null and Blanking packets due to * controller restriction */ static int sprd_dsi_wr_pkt(struct dsi_context *ctx, u8 vc, u8 type, const u8 *param, u16 len) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); u8 wc_lsbyte, wc_msbyte; u32 payload; int i, j, ret; if (vc > 3) return -EINVAL; /* 1st: for long packet, must config payload first */ ret = dsi_wait_tx_payload_fifo_empty(ctx); if (ret) { drm_err(dsi->drm, "tx payload fifo is not empty\n"); return ret; } if (len > 2) { for (i = 0, j = 0; i < len; i += j) { payload = 0; for (j = 0; (j < 4) && ((j + i) < (len)); j++) payload |= param[i + j] << (j * 8); writel(payload, ctx->base + GEN_PLD_DATA); } wc_lsbyte = len & 0xff; wc_msbyte = len >> 8; } else { wc_lsbyte = (len > 0) ? param[0] : 0; wc_msbyte = (len > 1) ? param[1] : 0; } /* 2nd: then set packet header */ ret = dsi_wait_tx_cmd_fifo_empty(ctx); if (ret) { drm_err(dsi->drm, "tx cmd fifo is not empty\n"); return ret; } writel(type | (vc << 6) | (wc_lsbyte << 8) | (wc_msbyte << 16), ctx->base + GEN_HDR); return 0; } /* * Send READ packet to peripheral using the generic interface, * this will force command mode and stop video mode (because of BTA). * * This function has an active delay to wait for the buffer to clear, * the delay is limited to 2 x DSIH_FIFO_ACTIVE_WAIT * (waiting for command buffer, and waiting for receiving) * @note this function will enable BTA */ static int sprd_dsi_rd_pkt(struct dsi_context *ctx, u8 vc, u8 type, u8 msb_byte, u8 lsb_byte, u8 *buffer, u8 bytes_to_read) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); int i, ret; int count = 0; u32 temp; if (vc > 3) return -EINVAL; /* 1st: send read command to peripheral */ ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_CMD_FIFO_EMPTY, 5); if (!ret) return -EIO; writel(type | (vc << 6) | (lsb_byte << 8) | (msb_byte << 16), ctx->base + GEN_HDR); /* 2nd: wait peripheral response completed */ ret = dsi_wait_rd_resp_completed(ctx); if (ret) { drm_err(dsi->drm, "wait read response time out\n"); return ret; } /* 3rd: get data from rx payload fifo */ ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1); if (ret) { drm_err(dsi->drm, "rx payload fifo empty\n"); return -EIO; } for (i = 0; i < 100; i++) { temp = readl(ctx->base + GEN_PLD_DATA); if (count < bytes_to_read) buffer[count++] = temp & 0xff; if (count < bytes_to_read) buffer[count++] = (temp >> 8) & 0xff; if (count < bytes_to_read) buffer[count++] = (temp >> 16) & 0xff; if (count < bytes_to_read) buffer[count++] = (temp >> 24) & 0xff; ret = dsi_reg_rd(ctx, CMD_MODE_STATUS, GEN_CMD_RDATA_FIFO_EMPTY, 1); if (ret) return count; } return 0; } static void sprd_dsi_set_work_mode(struct dsi_context *ctx, u8 mode) { if (mode == DSI_MODE_CMD) writel(1, ctx->base + DSI_MODE_CFG); else writel(0, ctx->base + DSI_MODE_CFG); } static void sprd_dsi_state_reset(struct dsi_context *ctx) { writel(0, ctx->base + SOFT_RESET); udelay(100); writel(1, ctx->base + SOFT_RESET); } static int sprd_dphy_init(struct dsi_context *ctx) { struct sprd_dsi *dsi = container_of(ctx, struct sprd_dsi, ctx); int ret; dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, 0); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, PHY_TESTCLR); dsi_reg_up(ctx, PHY_TST_CTRL0, PHY_TESTCLR, 0); dphy_pll_config(ctx); dphy_timing_config(ctx); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, RF_PHY_SHUTDOWN); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N); writel(0x1C, ctx->base + PHY_MIN_STOP_TIME); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN); writel(dsi->slave->lanes - 1, ctx->base + PHY_LANE_NUM_CONFIG); ret = dphy_wait_pll_locked(ctx); if (ret) { drm_err(dsi->drm, "dphy initial failed\n"); return ret; } return 0; } static void sprd_dphy_fini(struct dsi_context *ctx) { dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, 0); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_SHUTDOWN, 0); dsi_reg_up(ctx, PHY_INTERFACE_CTRL, RF_PHY_RESET_N, RF_PHY_RESET_N); } static void sprd_dsi_encoder_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adj_mode) { struct sprd_dsi *dsi = encoder_to_dsi(encoder); drm_display_mode_to_videomode(adj_mode, &dsi->ctx.vm); } static void sprd_dsi_encoder_enable(struct drm_encoder *encoder) { struct sprd_dsi *dsi = encoder_to_dsi(encoder); struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc); struct dsi_context *ctx = &dsi->ctx; if (ctx->enabled) { drm_warn(dsi->drm, "dsi is initialized\n"); return; } sprd_dsi_init(ctx); if (ctx->work_mode == DSI_MODE_VIDEO) sprd_dsi_dpi_video(ctx); else sprd_dsi_edpi_video(ctx); sprd_dphy_init(ctx); sprd_dsi_set_work_mode(ctx, ctx->work_mode); sprd_dsi_state_reset(ctx); if (dsi->slave->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS) { dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, AUTO_CLKLANE_CTRL_EN, AUTO_CLKLANE_CTRL_EN); } else { dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, RF_PHY_CLK_EN, RF_PHY_CLK_EN); dsi_reg_up(ctx, PHY_CLK_LANE_LP_CTRL, PHY_CLKLANE_TX_REQ_HS, PHY_CLKLANE_TX_REQ_HS); dphy_wait_pll_locked(ctx); } sprd_dpu_run(dpu); ctx->enabled = true; } static void sprd_dsi_encoder_disable(struct drm_encoder *encoder) { struct sprd_dsi *dsi = encoder_to_dsi(encoder); struct sprd_dpu *dpu = to_sprd_crtc(encoder->crtc); struct dsi_context *ctx = &dsi->ctx; if (!ctx->enabled) { drm_warn(dsi->drm, "dsi isn't initialized\n"); return; } sprd_dpu_stop(dpu); sprd_dphy_fini(ctx); sprd_dsi_fini(ctx); ctx->enabled = false; } static const struct drm_encoder_helper_funcs sprd_encoder_helper_funcs = { .mode_set = sprd_dsi_encoder_mode_set, .enable = sprd_dsi_encoder_enable, .disable = sprd_dsi_encoder_disable }; static const struct drm_encoder_funcs sprd_encoder_funcs = { .destroy = drm_encoder_cleanup, }; static int sprd_dsi_encoder_init(struct sprd_dsi *dsi, struct device *dev) { struct drm_encoder *encoder = &dsi->encoder; u32 crtc_mask; int ret; crtc_mask = drm_of_find_possible_crtcs(dsi->drm, dev->of_node); if (!crtc_mask) { drm_err(dsi->drm, "failed to find crtc mask\n"); return -EINVAL; } drm_dbg(dsi->drm, "find possible crtcs: 0x%08x\n", crtc_mask); encoder->possible_crtcs = crtc_mask; ret = drm_encoder_init(dsi->drm, encoder, &sprd_encoder_funcs, DRM_MODE_ENCODER_DSI, NULL); if (ret) { drm_err(dsi->drm, "failed to init dsi encoder\n"); return ret; } drm_encoder_helper_add(encoder, &sprd_encoder_helper_funcs); return 0; } static int sprd_dsi_bridge_init(struct sprd_dsi *dsi, struct device *dev) { int ret; dsi->panel_bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); if (IS_ERR(dsi->panel_bridge)) return PTR_ERR(dsi->panel_bridge); ret = drm_bridge_attach(&dsi->encoder, dsi->panel_bridge, NULL, 0); if (ret) return ret; return 0; } static int sprd_dsi_context_init(struct sprd_dsi *dsi, struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct dsi_context *ctx = &dsi->ctx; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "failed to get I/O resource\n"); return -EINVAL; } ctx->base = devm_ioremap(dev, res->start, resource_size(res)); if (!ctx->base) { drm_err(dsi->drm, "failed to map dsi host registers\n"); return -ENXIO; } ctx->regmap = devm_regmap_init(dev, &regmap_tst_io, dsi, &byte_config); if (IS_ERR(ctx->regmap)) { drm_err(dsi->drm, "dphy regmap init failed\n"); return PTR_ERR(ctx->regmap); } ctx->data_hs2lp = 120; ctx->data_lp2hs = 500; ctx->clk_hs2lp = 4; ctx->clk_lp2hs = 15; ctx->max_rd_time = 6000; ctx->int0_mask = 0xffffffff; ctx->int1_mask = 0xffffffff; ctx->enabled = true; return 0; } static int sprd_dsi_bind(struct device *dev, struct device *master, void *data) { struct drm_device *drm = data; struct sprd_dsi *dsi = dev_get_drvdata(dev); int ret; dsi->drm = drm; ret = sprd_dsi_encoder_init(dsi, dev); if (ret) return ret; ret = sprd_dsi_bridge_init(dsi, dev); if (ret) return ret; ret = sprd_dsi_context_init(dsi, dev); if (ret) return ret; return 0; } static void sprd_dsi_unbind(struct device *dev, struct device *master, void *data) { struct sprd_dsi *dsi = dev_get_drvdata(dev); drm_of_panel_bridge_remove(dev->of_node, 1, 0); drm_encoder_cleanup(&dsi->encoder); } static const struct component_ops dsi_component_ops = { .bind = sprd_dsi_bind, .unbind = sprd_dsi_unbind, }; static int sprd_dsi_host_attach(struct mipi_dsi_host *host, struct mipi_dsi_device *slave) { struct sprd_dsi *dsi = host_to_dsi(host); struct dsi_context *ctx = &dsi->ctx; dsi->slave = slave; if (slave->mode_flags & MIPI_DSI_MODE_VIDEO) ctx->work_mode = DSI_MODE_VIDEO; else ctx->work_mode = DSI_MODE_CMD; if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_BURST) ctx->burst_mode = VIDEO_BURST_WITH_SYNC_PULSES; else if (slave->mode_flags & MIPI_DSI_MODE_VIDEO_SYNC_PULSE) ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_PULSES; else ctx->burst_mode = VIDEO_NON_BURST_WITH_SYNC_EVENTS; return component_add(host->dev, &dsi_component_ops); } static int sprd_dsi_host_detach(struct mipi_dsi_host *host, struct mipi_dsi_device *slave) { component_del(host->dev, &dsi_component_ops); return 0; } static ssize_t sprd_dsi_host_transfer(struct mipi_dsi_host *host, const struct mipi_dsi_msg *msg) { struct sprd_dsi *dsi = host_to_dsi(host); const u8 *tx_buf = msg->tx_buf; if (msg->rx_buf && msg->rx_len) { u8 lsb = (msg->tx_len > 0) ? tx_buf[0] : 0; u8 msb = (msg->tx_len > 1) ? tx_buf[1] : 0; return sprd_dsi_rd_pkt(&dsi->ctx, msg->channel, msg->type, msb, lsb, msg->rx_buf, msg->rx_len); } if (msg->tx_buf && msg->tx_len) return sprd_dsi_wr_pkt(&dsi->ctx, msg->channel, msg->type, tx_buf, msg->tx_len); return 0; } static const struct mipi_dsi_host_ops sprd_dsi_host_ops = { .attach = sprd_dsi_host_attach, .detach = sprd_dsi_host_detach, .transfer = sprd_dsi_host_transfer, }; static const struct of_device_id dsi_match_table[] = { { .compatible = "sprd,sharkl3-dsi-host" }, { /* sentinel */ }, }; static int sprd_dsi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sprd_dsi *dsi; dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL); if (!dsi) return -ENOMEM; dev_set_drvdata(dev, dsi); dsi->host.ops = &sprd_dsi_host_ops; dsi->host.dev = dev; return mipi_dsi_host_register(&dsi->host); } static void sprd_dsi_remove(struct platform_device *pdev) { struct sprd_dsi *dsi = dev_get_drvdata(&pdev->dev); mipi_dsi_host_unregister(&dsi->host); } struct platform_driver sprd_dsi_driver = { .probe = sprd_dsi_probe, .remove = sprd_dsi_remove, .driver = { .name = "sprd-dsi-drv", .of_match_table = dsi_match_table, }, }; MODULE_AUTHOR("Leon He <[email protected]>"); MODULE_AUTHOR("Kevin Tang <[email protected]>"); MODULE_DESCRIPTION("Unisoc MIPI DSI HOST Controller Driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0-only /* * w1_ds28e04.c - w1 family 1C (DS28E04) driver * * Copyright (c) 2012 Markus Franke <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/device.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/crc16.h> #include <linux/uaccess.h> #define CRC16_INIT 0 #define CRC16_VALID 0xb001 #include <linux/w1.h> #define W1_FAMILY_DS28E04 0x1C /* Allow the strong pullup to be disabled, but default to enabled. * If it was disabled a parasite powered device might not get the required * current to copy the data from the scratchpad to EEPROM. If it is enabled * parasite powered devices have a better chance of getting the current * required. */ static int w1_strong_pullup = 1; module_param_named(strong_pullup, w1_strong_pullup, int, 0); /* enable/disable CRC checking on DS28E04-100 memory accesses */ static bool w1_enable_crccheck = true; #define W1_EEPROM_SIZE 512 #define W1_PAGE_COUNT 16 #define W1_PAGE_SIZE 32 #define W1_PAGE_BITS 5 #define W1_PAGE_MASK 0x1F #define W1_F1C_READ_EEPROM 0xF0 #define W1_F1C_WRITE_SCRATCH 0x0F #define W1_F1C_READ_SCRATCH 0xAA #define W1_F1C_COPY_SCRATCH 0x55 #define W1_F1C_ACCESS_WRITE 0x5A #define W1_1C_REG_LOGIC_STATE 0x220 struct w1_f1C_data { u8 memory[W1_EEPROM_SIZE]; u32 validcrc; }; /* * Check the file size bounds and adjusts count as needed. * This would not be needed if the file size didn't reset to 0 after a write. */ static inline size_t w1_f1C_fix_count(loff_t off, size_t count, size_t size) { if (off > size) return 0; if ((off + count) > size) return size - off; return count; } static int w1_f1C_refresh_block(struct w1_slave *sl, struct w1_f1C_data *data, int block) { u8 wrbuf[3]; int off = block * W1_PAGE_SIZE; if (data->validcrc & (1 << block)) return 0; if (w1_reset_select_slave(sl)) { data->validcrc = 0; return -EIO; } wrbuf[0] = W1_F1C_READ_EEPROM; wrbuf[1] = off & 0xff; wrbuf[2] = off >> 8; w1_write_block(sl->master, wrbuf, 3); w1_read_block(sl->master, &data->memory[off], W1_PAGE_SIZE); /* cache the block if the CRC is valid */ if (crc16(CRC16_INIT, &data->memory[off], W1_PAGE_SIZE) == CRC16_VALID) data->validcrc |= (1 << block); return 0; } static int w1_f1C_read(struct w1_slave *sl, int addr, int len, char *data) { u8 wrbuf[3]; /* read directly from the EEPROM */ if (w1_reset_select_slave(sl)) return -EIO; wrbuf[0] = W1_F1C_READ_EEPROM; wrbuf[1] = addr & 0xff; wrbuf[2] = addr >> 8; w1_write_block(sl->master, wrbuf, sizeof(wrbuf)); return w1_read_block(sl->master, data, len); } static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); struct w1_f1C_data *data = sl->family_data; int i, min_page, max_page; count = w1_f1C_fix_count(off, count, W1_EEPROM_SIZE); if (count == 0) return 0; mutex_lock(&sl->master->mutex); if (w1_enable_crccheck) { min_page = (off >> W1_PAGE_BITS); max_page = (off + count - 1) >> W1_PAGE_BITS; for (i = min_page; i <= max_page; i++) { if (w1_f1C_refresh_block(sl, data, i)) { count = -EIO; goto out_up; } } memcpy(buf, &data->memory[off], count); } else { count = w1_f1C_read(sl, off, count, buf); } out_up: mutex_unlock(&sl->master->mutex); return count; } /** * w1_f1C_write() - Writes to the scratchpad and reads it back for verification. * @sl: The slave structure * @addr: Address for the write * @len: length must be <= (W1_PAGE_SIZE - (addr & W1_PAGE_MASK)) * @data: The data to write * * Then copies the scratchpad to EEPROM. * The data must be on one page. * The master must be locked. * * Return: 0=Success, -1=failure */ static int w1_f1C_write(struct w1_slave *sl, int addr, int len, const u8 *data) { u8 wrbuf[4]; u8 rdbuf[W1_PAGE_SIZE + 3]; u8 es = (addr + len - 1) & 0x1f; unsigned int tm = 10; int i; struct w1_f1C_data *f1C = sl->family_data; /* Write the data to the scratchpad */ if (w1_reset_select_slave(sl)) return -1; wrbuf[0] = W1_F1C_WRITE_SCRATCH; wrbuf[1] = addr & 0xff; wrbuf[2] = addr >> 8; w1_write_block(sl->master, wrbuf, 3); w1_write_block(sl->master, data, len); /* Read the scratchpad and verify */ if (w1_reset_select_slave(sl)) return -1; w1_write_8(sl->master, W1_F1C_READ_SCRATCH); w1_read_block(sl->master, rdbuf, len + 3); /* Compare what was read against the data written */ if ((rdbuf[0] != wrbuf[1]) || (rdbuf[1] != wrbuf[2]) || (rdbuf[2] != es) || (memcmp(data, &rdbuf[3], len) != 0)) return -1; /* Copy the scratchpad to EEPROM */ if (w1_reset_select_slave(sl)) return -1; wrbuf[0] = W1_F1C_COPY_SCRATCH; wrbuf[3] = es; for (i = 0; i < sizeof(wrbuf); ++i) { /* * issue 10ms strong pullup (or delay) on the last byte * for writing the data from the scratchpad to EEPROM */ if (w1_strong_pullup && i == sizeof(wrbuf)-1) w1_next_pullup(sl->master, tm); w1_write_8(sl->master, wrbuf[i]); } if (!w1_strong_pullup) msleep(tm); if (w1_enable_crccheck) { /* invalidate cached data */ f1C->validcrc &= ~(1 << (addr >> W1_PAGE_BITS)); } /* Reset the bus to wake up the EEPROM (this may not be needed) */ w1_reset_bus(sl->master); return 0; } static ssize_t eeprom_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); int addr, len, idx; count = w1_f1C_fix_count(off, count, W1_EEPROM_SIZE); if (count == 0) return 0; if (w1_enable_crccheck) { /* can only write full blocks in cached mode */ if ((off & W1_PAGE_MASK) || (count & W1_PAGE_MASK)) { dev_err(&sl->dev, "invalid offset/count off=%d cnt=%zd\n", (int)off, count); return -EINVAL; } /* make sure the block CRCs are valid */ for (idx = 0; idx < count; idx += W1_PAGE_SIZE) { if (crc16(CRC16_INIT, &buf[idx], W1_PAGE_SIZE) != CRC16_VALID) { dev_err(&sl->dev, "bad CRC at offset %d\n", (int)off); return -EINVAL; } } } mutex_lock(&sl->master->mutex); /* Can only write data to one page at a time */ idx = 0; while (idx < count) { addr = off + idx; len = W1_PAGE_SIZE - (addr & W1_PAGE_MASK); if (len > (count - idx)) len = count - idx; if (w1_f1C_write(sl, addr, len, &buf[idx]) < 0) { count = -EIO; goto out_up; } idx += len; } out_up: mutex_unlock(&sl->master->mutex); return count; } static BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE); static ssize_t pio_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); int ret; /* check arguments */ if (off != 0 || count != 1 || buf == NULL) return -EINVAL; mutex_lock(&sl->master->mutex); ret = w1_f1C_read(sl, W1_1C_REG_LOGIC_STATE, count, buf); mutex_unlock(&sl->master->mutex); return ret; } static ssize_t pio_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct w1_slave *sl = kobj_to_w1_slave(kobj); u8 wrbuf[3]; u8 ack; /* check arguments */ if (off != 0 || count != 1 || buf == NULL) return -EINVAL; mutex_lock(&sl->master->mutex); /* Write the PIO data */ if (w1_reset_select_slave(sl)) { mutex_unlock(&sl->master->mutex); return -1; } /* set bit 7..2 to value '1' */ *buf = *buf | 0xFC; wrbuf[0] = W1_F1C_ACCESS_WRITE; wrbuf[1] = *buf; wrbuf[2] = ~(*buf); w1_write_block(sl->master, wrbuf, 3); w1_read_block(sl->master, &ack, sizeof(ack)); mutex_unlock(&sl->master->mutex); /* check for acknowledgement */ if (ack != 0xAA) return -EIO; return count; } static BIN_ATTR_RW(pio, 1); static ssize_t crccheck_show(struct device *dev, struct device_attribute *attr, char *buf) { return sysfs_emit(buf, "%d\n", w1_enable_crccheck); } static ssize_t crccheck_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int err = kstrtobool(buf, &w1_enable_crccheck); if (err) return err; return count; } static DEVICE_ATTR_RW(crccheck); static struct attribute *w1_f1C_attrs[] = { &dev_attr_crccheck.attr, NULL, }; static struct bin_attribute *w1_f1C_bin_attrs[] = { &bin_attr_eeprom, &bin_attr_pio, NULL, }; static const struct attribute_group w1_f1C_group = { .attrs = w1_f1C_attrs, .bin_attrs = w1_f1C_bin_attrs, }; static const struct attribute_group *w1_f1C_groups[] = { &w1_f1C_group, NULL, }; static int w1_f1C_add_slave(struct w1_slave *sl) { struct w1_f1C_data *data = NULL; if (w1_enable_crccheck) { data = kzalloc(sizeof(struct w1_f1C_data), GFP_KERNEL); if (!data) return -ENOMEM; sl->family_data = data; } return 0; } static void w1_f1C_remove_slave(struct w1_slave *sl) { kfree(sl->family_data); sl->family_data = NULL; } static const struct w1_family_ops w1_f1C_fops = { .add_slave = w1_f1C_add_slave, .remove_slave = w1_f1C_remove_slave, .groups = w1_f1C_groups, }; static struct w1_family w1_family_1C = { .fid = W1_FAMILY_DS28E04, .fops = &w1_f1C_fops, }; module_w1_family(w1_family_1C); MODULE_AUTHOR("Markus Franke <[email protected]>, <[email protected]>"); MODULE_DESCRIPTION("w1 family 1C driver for DS28E04, 4kb EEPROM and PIO"); MODULE_LICENSE("GPL"); MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_DS28E04));
// SPDX-License-Identifier: GPL-2.0 /* Converted from tools/testing/selftests/bpf/verifier/map_ptr_mixing.c */ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include "bpf_misc.h" #define MAX_ENTRIES 11 struct test_val { unsigned int index; int foo[MAX_ENTRIES]; }; struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); __type(key, int); __type(value, struct test_val); } map_array_48b SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_HASH); __uint(max_entries, 1); __type(key, long long); __type(value, struct test_val); } map_hash_48b SEC(".maps"); struct { __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS); __uint(max_entries, 1); __type(key, int); __type(value, int); __array(values, struct { __uint(type, BPF_MAP_TYPE_ARRAY); __uint(max_entries, 1); __type(key, int); __type(value, int); }); } map_in_map SEC(".maps"); void dummy_prog_42_socket(void); void dummy_prog_24_socket(void); void dummy_prog_loop1_socket(void); void dummy_prog_loop2_socket(void); struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); __uint(max_entries, 4); __uint(key_size, sizeof(int)); __array(values, void (void)); } map_prog1_socket SEC(".maps") = { .values = { [0] = (void *)&dummy_prog_42_socket, [1] = (void *)&dummy_prog_loop1_socket, [2] = (void *)&dummy_prog_24_socket, }, }; struct { __uint(type, BPF_MAP_TYPE_PROG_ARRAY); __uint(max_entries, 8); __uint(key_size, sizeof(int)); __array(values, void (void)); } map_prog2_socket SEC(".maps") = { .values = { [1] = (void *)&dummy_prog_loop2_socket, [2] = (void *)&dummy_prog_24_socket, [7] = (void *)&dummy_prog_42_socket, }, }; SEC("socket") __auxiliary __auxiliary_unpriv __naked void dummy_prog_42_socket(void) { asm volatile ("r0 = 42; exit;"); } SEC("socket") __auxiliary __auxiliary_unpriv __naked void dummy_prog_24_socket(void) { asm volatile ("r0 = 24; exit;"); } SEC("socket") __auxiliary __auxiliary_unpriv __naked void dummy_prog_loop1_socket(void) { asm volatile (" \ r3 = 1; \ r2 = %[map_prog1_socket] ll; \ call %[bpf_tail_call]; \ r0 = 41; \ exit; \ " : : __imm(bpf_tail_call), __imm_addr(map_prog1_socket) : __clobber_all); } SEC("socket") __auxiliary __auxiliary_unpriv __naked void dummy_prog_loop2_socket(void) { asm volatile (" \ r3 = 1; \ r2 = %[map_prog2_socket] ll; \ call %[bpf_tail_call]; \ r0 = 41; \ exit; \ " : : __imm(bpf_tail_call), __imm_addr(map_prog2_socket) : __clobber_all); } SEC("tc") __description("calls: two calls returning different map pointers for lookup (hash, array)") __success __retval(1) __naked void pointers_for_lookup_hash_array(void) { asm volatile (" \ /* main prog */ \ if r1 != 0 goto l0_%=; \ call pointers_for_lookup_hash_array__1; \ goto l1_%=; \ l0_%=: call pointers_for_lookup_hash_array__2; \ l1_%=: r1 = r0; \ r2 = 0; \ *(u64*)(r10 - 8) = r2; \ r2 = r10; \ r2 += -8; \ call %[bpf_map_lookup_elem]; \ if r0 == 0 goto l2_%=; \ r1 = %[test_val_foo]; \ *(u64*)(r0 + 0) = r1; \ r0 = 1; \ l2_%=: exit; \ " : : __imm(bpf_map_lookup_elem), __imm_const(test_val_foo, offsetof(struct test_val, foo)) : __clobber_all); } static __naked __noinline __attribute__((used)) void pointers_for_lookup_hash_array__1(void) { asm volatile (" \ r0 = %[map_hash_48b] ll; \ exit; \ " : : __imm_addr(map_hash_48b) : __clobber_all); } static __naked __noinline __attribute__((used)) void pointers_for_lookup_hash_array__2(void) { asm volatile (" \ r0 = %[map_array_48b] ll; \ exit; \ " : : __imm_addr(map_array_48b) : __clobber_all); } SEC("tc") __description("calls: two calls returning different map pointers for lookup (hash, map in map)") __failure __msg("only read from bpf_array is supported") __naked void lookup_hash_map_in_map(void) { asm volatile (" \ /* main prog */ \ if r1 != 0 goto l0_%=; \ call lookup_hash_map_in_map__1; \ goto l1_%=; \ l0_%=: call lookup_hash_map_in_map__2; \ l1_%=: r1 = r0; \ r2 = 0; \ *(u64*)(r10 - 8) = r2; \ r2 = r10; \ r2 += -8; \ call %[bpf_map_lookup_elem]; \ if r0 == 0 goto l2_%=; \ r1 = %[test_val_foo]; \ *(u64*)(r0 + 0) = r1; \ r0 = 1; \ l2_%=: exit; \ " : : __imm(bpf_map_lookup_elem), __imm_const(test_val_foo, offsetof(struct test_val, foo)) : __clobber_all); } static __naked __noinline __attribute__((used)) void lookup_hash_map_in_map__1(void) { asm volatile (" \ r0 = %[map_array_48b] ll; \ exit; \ " : : __imm_addr(map_array_48b) : __clobber_all); } static __naked __noinline __attribute__((used)) void lookup_hash_map_in_map__2(void) { asm volatile (" \ r0 = %[map_in_map] ll; \ exit; \ " : : __imm_addr(map_in_map) : __clobber_all); } SEC("socket") __description("cond: two branches returning different map pointers for lookup (tail, tail)") __success __failure_unpriv __msg_unpriv("tail_call abusing map_ptr") __retval(42) __naked void pointers_for_lookup_tail_tail_1(void) { asm volatile (" \ r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ if r6 != 0 goto l0_%=; \ r2 = %[map_prog2_socket] ll; \ goto l1_%=; \ l0_%=: r2 = %[map_prog1_socket] ll; \ l1_%=: r3 = 7; \ call %[bpf_tail_call]; \ r0 = 1; \ exit; \ " : : __imm(bpf_tail_call), __imm_addr(map_prog1_socket), __imm_addr(map_prog2_socket), __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) : __clobber_all); } SEC("socket") __description("cond: two branches returning same map pointers for lookup (tail, tail)") __success __success_unpriv __retval(42) __naked void pointers_for_lookup_tail_tail_2(void) { asm volatile (" \ r6 = *(u32*)(r1 + %[__sk_buff_mark]); \ if r6 == 0 goto l0_%=; \ r2 = %[map_prog2_socket] ll; \ goto l1_%=; \ l0_%=: r2 = %[map_prog2_socket] ll; \ l1_%=: r3 = 7; \ call %[bpf_tail_call]; \ r0 = 1; \ exit; \ " : : __imm(bpf_tail_call), __imm_addr(map_prog2_socket), __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)) : __clobber_all); } char _license[] SEC("license") = "GPL";
// SPDX-License-Identifier: GPL-2.0-or-later /* * Device tree source for the Emerson/Artesyn MVME7100 * * Copyright 2016 Elettra-Sincrotrone Trieste S.C.p.A. * * Author: Alessio Igor Bogani <[email protected]> */ /include/ "mpc8641si-pre.dtsi" / { model = "MVME7100"; compatible = "artesyn,MVME7100"; memory { device_type = "memory"; reg = <0x00000000 0x80000000>; }; soc: soc@f1000000 { ranges = <0x00000000 0xf1000000 0x00100000>; i2c@3000 { hwmon@4c { compatible = "dallas,max6649"; reg = <0x4c>; }; rtc@68 { status = "disabled"; }; }; enet0: ethernet@24000 { phy-handle = <&phy0>; phy-connection-type = "rgmii-id"; }; mdio@24520 { phy0: ethernet-phy@1 { reg = <1>; }; phy1: ethernet-phy@2 { reg = <2>; }; phy2: ethernet-phy@3 { reg = <3>; }; phy3: ethernet-phy@4 { reg = <4>; }; }; enet1: ethernet@25000 { phy-handle = <&phy1>; phy-connection-type = "rgmii-id"; }; mdio@25520 { status = "disabled"; }; enet2: ethernet@26000 { phy-handle = <&phy2>; phy-connection-type = "rgmii-id"; }; mdio@26520 { status = "disabled"; }; enet3: ethernet@27000 { phy-handle = <&phy3>; phy-connection-type = "rgmii-id"; }; mdio@27520 { status = "disabled"; }; serial1: serial@4600 { status = "disabled"; }; }; lbc: localbus@f1005000 { reg = <0xf1005000 0x1000>; ranges = <0 0 0xf8000000 0x08000000 // NOR Flash (128MB) 2 0 0xf2030000 0x00010000 // NAND Flash (8GB) 3 0 0xf2400000 0x00080000 // MRAM (512KB) 4 0 0xf2000000 0x00010000 // BCSR 5 0 0xf2010000 0x00010000>; // QUART bcsr@4,0 { compatible = "artesyn,mvme7100-bcsr"; reg = <4 0 0x10000>; }; serial@5,1000 { device_type = "serial"; compatible = "ns16550"; reg = <5 0x1000 0x100>; clock-frequency = <1843200>; interrupts = <11 1 0 0>; }; serial@5,2000 { device_type = "serial"; compatible = "ns16550"; reg = <5 0x2000 0x100>; clock-frequency = <1843200>; interrupts = <11 1 0 0>; }; serial@5,3000 { device_type = "serial"; compatible = "ns16550"; reg = <5 0x3000 0x100>; clock-frequency = <1843200>; interrupts = <11 1 0 0>; }; serial@5,4000 { device_type = "serial"; compatible = "ns16550"; reg = <5 0x4000 0x100>; clock-frequency = <1843200>; interrupts = <11 1 0 0>; }; }; pci0: pcie@f1008000 { status = "disabled"; }; pci1: pcie@f1009000 { status = "disabled"; }; chosen { stdout-path = &serial0; }; }; /include/ "mpc8641si-post.dtsi"
// SPDX-License-Identifier: GPL-2.0 /* * Functions related to io context handling */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/slab.h> #include <linux/security.h> #include <linux/sched/task.h> #include "blk.h" #include "blk-mq-sched.h" /* * For io context allocations */ static struct kmem_cache *iocontext_cachep; #ifdef CONFIG_BLK_ICQ /** * get_io_context - increment reference count to io_context * @ioc: io_context to get * * Increment reference count to @ioc. */ static void get_io_context(struct io_context *ioc) { BUG_ON(atomic_long_read(&ioc->refcount) <= 0); atomic_long_inc(&ioc->refcount); } /* * Exit an icq. Called with ioc locked for blk-mq, and with both ioc * and queue locked for legacy. */ static void ioc_exit_icq(struct io_cq *icq) { struct elevator_type *et = icq->q->elevator->type; if (icq->flags & ICQ_EXITED) return; if (et->ops.exit_icq) et->ops.exit_icq(icq); icq->flags |= ICQ_EXITED; } static void ioc_exit_icqs(struct io_context *ioc) { struct io_cq *icq; spin_lock_irq(&ioc->lock); hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) ioc_exit_icq(icq); spin_unlock_irq(&ioc->lock); } /* * Release an icq. Called with ioc locked for blk-mq, and with both ioc * and queue locked for legacy. */ static void ioc_destroy_icq(struct io_cq *icq) { struct io_context *ioc = icq->ioc; struct request_queue *q = icq->q; struct elevator_type *et = q->elevator->type; lockdep_assert_held(&ioc->lock); lockdep_assert_held(&q->queue_lock); if (icq->flags & ICQ_DESTROYED) return; radix_tree_delete(&ioc->icq_tree, icq->q->id); hlist_del_init(&icq->ioc_node); list_del_init(&icq->q_node); /* * Both setting lookup hint to and clearing it from @icq are done * under queue_lock. If it's not pointing to @icq now, it never * will. Hint assignment itself can race safely. */ if (rcu_access_pointer(ioc->icq_hint) == icq) rcu_assign_pointer(ioc->icq_hint, NULL); ioc_exit_icq(icq); /* * @icq->q might have gone away by the time RCU callback runs * making it impossible to determine icq_cache. Record it in @icq. */ icq->__rcu_icq_cache = et->icq_cache; icq->flags |= ICQ_DESTROYED; kfree_rcu(icq, __rcu_head); } /* * Slow path for ioc release in put_io_context(). Performs double-lock * dancing to unlink all icq's and then frees ioc. */ static void ioc_release_fn(struct work_struct *work) { struct io_context *ioc = container_of(work, struct io_context, release_work); spin_lock_irq(&ioc->lock); while (!hlist_empty(&ioc->icq_list)) { struct io_cq *icq = hlist_entry(ioc->icq_list.first, struct io_cq, ioc_node); struct request_queue *q = icq->q; if (spin_trylock(&q->queue_lock)) { ioc_destroy_icq(icq); spin_unlock(&q->queue_lock); } else { /* Make sure q and icq cannot be freed. */ rcu_read_lock(); /* Re-acquire the locks in the correct order. */ spin_unlock(&ioc->lock); spin_lock(&q->queue_lock); spin_lock(&ioc->lock); ioc_destroy_icq(icq); spin_unlock(&q->queue_lock); rcu_read_unlock(); } } spin_unlock_irq(&ioc->lock); kmem_cache_free(iocontext_cachep, ioc); } /* * Releasing icqs requires reverse order double locking and we may already be * holding a queue_lock. Do it asynchronously from a workqueue. */ static bool ioc_delay_free(struct io_context *ioc) { unsigned long flags; spin_lock_irqsave(&ioc->lock, flags); if (!hlist_empty(&ioc->icq_list)) { queue_work(system_power_efficient_wq, &ioc->release_work); spin_unlock_irqrestore(&ioc->lock, flags); return true; } spin_unlock_irqrestore(&ioc->lock, flags); return false; } /** * ioc_clear_queue - break any ioc association with the specified queue * @q: request_queue being cleared * * Walk @q->icq_list and exit all io_cq's. */ void ioc_clear_queue(struct request_queue *q) { spin_lock_irq(&q->queue_lock); while (!list_empty(&q->icq_list)) { struct io_cq *icq = list_first_entry(&q->icq_list, struct io_cq, q_node); /* * Other context won't hold ioc lock to wait for queue_lock, see * details in ioc_release_fn(). */ spin_lock(&icq->ioc->lock); ioc_destroy_icq(icq); spin_unlock(&icq->ioc->lock); } spin_unlock_irq(&q->queue_lock); } #else /* CONFIG_BLK_ICQ */ static inline void ioc_exit_icqs(struct io_context *ioc) { } static inline bool ioc_delay_free(struct io_context *ioc) { return false; } #endif /* CONFIG_BLK_ICQ */ /** * put_io_context - put a reference of io_context * @ioc: io_context to put * * Decrement reference count of @ioc and release it if the count reaches * zero. */ void put_io_context(struct io_context *ioc) { BUG_ON(atomic_long_read(&ioc->refcount) <= 0); if (atomic_long_dec_and_test(&ioc->refcount) && !ioc_delay_free(ioc)) kmem_cache_free(iocontext_cachep, ioc); } EXPORT_SYMBOL_GPL(put_io_context); /* Called by the exiting task */ void exit_io_context(struct task_struct *task) { struct io_context *ioc; task_lock(task); ioc = task->io_context; task->io_context = NULL; task_unlock(task); if (atomic_dec_and_test(&ioc->active_ref)) { ioc_exit_icqs(ioc); put_io_context(ioc); } } static struct io_context *alloc_io_context(gfp_t gfp_flags, int node) { struct io_context *ioc; ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, node); if (unlikely(!ioc)) return NULL; atomic_long_set(&ioc->refcount, 1); atomic_set(&ioc->active_ref, 1); #ifdef CONFIG_BLK_ICQ spin_lock_init(&ioc->lock); INIT_RADIX_TREE(&ioc->icq_tree, GFP_ATOMIC); INIT_HLIST_HEAD(&ioc->icq_list); INIT_WORK(&ioc->release_work, ioc_release_fn); #endif ioc->ioprio = IOPRIO_DEFAULT; return ioc; } int set_task_ioprio(struct task_struct *task, int ioprio) { int err; const struct cred *cred = current_cred(), *tcred; rcu_read_lock(); tcred = __task_cred(task); if (!uid_eq(tcred->uid, cred->euid) && !uid_eq(tcred->uid, cred->uid) && !capable(CAP_SYS_NICE)) { rcu_read_unlock(); return -EPERM; } rcu_read_unlock(); err = security_task_setioprio(task, ioprio); if (err) return err; task_lock(task); if (unlikely(!task->io_context)) { struct io_context *ioc; task_unlock(task); ioc = alloc_io_context(GFP_ATOMIC, NUMA_NO_NODE); if (!ioc) return -ENOMEM; task_lock(task); if (task->flags & PF_EXITING) { kmem_cache_free(iocontext_cachep, ioc); goto out; } if (task->io_context) kmem_cache_free(iocontext_cachep, ioc); else task->io_context = ioc; } task->io_context->ioprio = ioprio; out: task_unlock(task); return 0; } EXPORT_SYMBOL_GPL(set_task_ioprio); int __copy_io(unsigned long clone_flags, struct task_struct *tsk) { struct io_context *ioc = current->io_context; /* * Share io context with parent, if CLONE_IO is set */ if (clone_flags & CLONE_IO) { atomic_inc(&ioc->active_ref); tsk->io_context = ioc; } else if (ioprio_valid(ioc->ioprio)) { tsk->io_context = alloc_io_context(GFP_KERNEL, NUMA_NO_NODE); if (!tsk->io_context) return -ENOMEM; tsk->io_context->ioprio = ioc->ioprio; } return 0; } #ifdef CONFIG_BLK_ICQ /** * ioc_lookup_icq - lookup io_cq from ioc * @q: the associated request_queue * * Look up io_cq associated with @ioc - @q pair from @ioc. Must be called * with @q->queue_lock held. */ struct io_cq *ioc_lookup_icq(struct request_queue *q) { struct io_context *ioc = current->io_context; struct io_cq *icq; lockdep_assert_held(&q->queue_lock); /* * icq's are indexed from @ioc using radix tree and hint pointer, * both of which are protected with RCU. All removals are done * holding both q and ioc locks, and we're holding q lock - if we * find a icq which points to us, it's guaranteed to be valid. */ rcu_read_lock(); icq = rcu_dereference(ioc->icq_hint); if (icq && icq->q == q) goto out; icq = radix_tree_lookup(&ioc->icq_tree, q->id); if (icq && icq->q == q) rcu_assign_pointer(ioc->icq_hint, icq); /* allowed to race */ else icq = NULL; out: rcu_read_unlock(); return icq; } EXPORT_SYMBOL(ioc_lookup_icq); /** * ioc_create_icq - create and link io_cq * @q: request_queue of interest * * Make sure io_cq linking @ioc and @q exists. If icq doesn't exist, they * will be created using @gfp_mask. * * The caller is responsible for ensuring @ioc won't go away and @q is * alive and will stay alive until this function returns. */ static struct io_cq *ioc_create_icq(struct request_queue *q) { struct io_context *ioc = current->io_context; struct elevator_type *et = q->elevator->type; struct io_cq *icq; /* allocate stuff */ icq = kmem_cache_alloc_node(et->icq_cache, GFP_ATOMIC | __GFP_ZERO, q->node); if (!icq) return NULL; if (radix_tree_maybe_preload(GFP_ATOMIC) < 0) { kmem_cache_free(et->icq_cache, icq); return NULL; } icq->ioc = ioc; icq->q = q; INIT_LIST_HEAD(&icq->q_node); INIT_HLIST_NODE(&icq->ioc_node); /* lock both q and ioc and try to link @icq */ spin_lock_irq(&q->queue_lock); spin_lock(&ioc->lock); if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { hlist_add_head(&icq->ioc_node, &ioc->icq_list); list_add(&icq->q_node, &q->icq_list); if (et->ops.init_icq) et->ops.init_icq(icq); } else { kmem_cache_free(et->icq_cache, icq); icq = ioc_lookup_icq(q); if (!icq) printk(KERN_ERR "cfq: icq link failed!\n"); } spin_unlock(&ioc->lock); spin_unlock_irq(&q->queue_lock); radix_tree_preload_end(); return icq; } struct io_cq *ioc_find_get_icq(struct request_queue *q) { struct io_context *ioc = current->io_context; struct io_cq *icq = NULL; if (unlikely(!ioc)) { ioc = alloc_io_context(GFP_ATOMIC, q->node); if (!ioc) return NULL; task_lock(current); if (current->io_context) { kmem_cache_free(iocontext_cachep, ioc); ioc = current->io_context; } else { current->io_context = ioc; } get_io_context(ioc); task_unlock(current); } else { get_io_context(ioc); spin_lock_irq(&q->queue_lock); icq = ioc_lookup_icq(q); spin_unlock_irq(&q->queue_lock); } if (!icq) { icq = ioc_create_icq(q); if (!icq) { put_io_context(ioc); return NULL; } } return icq; } EXPORT_SYMBOL_GPL(ioc_find_get_icq); #endif /* CONFIG_BLK_ICQ */ static int __init blk_ioc_init(void) { iocontext_cachep = kmem_cache_create("blkdev_ioc", sizeof(struct io_context), 0, SLAB_PANIC, NULL); return 0; } subsys_initcall(blk_ioc_init);
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. */ #ifndef _ENIC_RES_H_ #define _ENIC_RES_H_ #include "wq_enet_desc.h" #include "rq_enet_desc.h" #include "vnic_wq.h" #include "vnic_rq.h" #define ENIC_MIN_WQ_DESCS 64 #define ENIC_MAX_WQ_DESCS 4096 #define ENIC_MIN_RQ_DESCS 64 #define ENIC_MAX_RQ_DESCS 4096 #define ENIC_MIN_MTU ETH_MIN_MTU #define ENIC_MAX_MTU 9000 #define ENIC_MULTICAST_PERFECT_FILTERS 32 #define ENIC_UNICAST_PERFECT_FILTERS 32 #define ENIC_NON_TSO_MAX_DESC 16 #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, unsigned int mss_or_csum_offset, unsigned int hdr_len, int vlan_tag_insert, unsigned int vlan_tag, int offload_mode, int cq_entry, int sop, int eop, int loopback) { struct wq_enet_desc *desc = vnic_wq_next_desc(wq); u8 desc_skip_cnt = 1; u8 compressed_send = 0; u64 wrid = 0; wq_enet_desc_enc(desc, (u64)dma_addr | VNIC_PADDR_TARGET, (u16)len, (u16)mss_or_csum_offset, (u16)hdr_len, (u8)offload_mode, (u8)eop, (u8)cq_entry, 0, /* fcoe_encap */ (u8)vlan_tag_insert, (u16)vlan_tag, (u8)loopback); vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, (u8)cq_entry, compressed_send, wrid); } static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, int eop, int loopback) { enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 0, 0, 0, 0, 0, eop, 0 /* !SOP */, eop, loopback); } static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) { enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, 0, 0, vlan_tag_insert, vlan_tag, WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop, loopback); } static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, int ip_csum, int tcpudp_csum, int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) { enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), 0, vlan_tag_insert, vlan_tag, WQ_ENET_OFFLOAD_MODE_CSUM, eop, 1 /* SOP */, eop, loopback); } static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, unsigned int csum_offset, unsigned int hdr_len, int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) { enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, csum_offset, hdr_len, vlan_tag_insert, vlan_tag, WQ_ENET_OFFLOAD_MODE_CSUM_L4, eop, 1 /* SOP */, eop, loopback); } static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, void *os_buf, dma_addr_t dma_addr, unsigned int len, unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) { enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, mss, hdr_len, vlan_tag_insert, vlan_tag, WQ_ENET_OFFLOAD_MODE_TSO, eop, 1 /* SOP */, eop, loopback); } static inline void enic_queue_rq_desc(struct vnic_rq *rq, void *os_buf, unsigned int os_buf_index, dma_addr_t dma_addr, unsigned int len) { struct rq_enet_desc *desc = vnic_rq_next_desc(rq); u64 wrid = 0; u8 type = os_buf_index ? RQ_ENET_TYPE_NOT_SOP : RQ_ENET_TYPE_ONLY_SOP; rq_enet_desc_enc(desc, (u64)dma_addr | VNIC_PADDR_TARGET, type, (u16)len); vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid); } struct enic; int enic_get_vnic_config(struct enic *); int enic_add_vlan(struct enic *enic, u16 vlanid); int enic_del_vlan(struct enic *enic, u16 vlanid); int enic_set_nic_cfg(struct enic *enic, u8 rss_default_cpu, u8 rss_hash_type, u8 rss_hash_bits, u8 rss_base_cpu, u8 rss_enable, u8 tso_ipid_split_en, u8 ig_vlan_strip_en); int enic_set_rss_key(struct enic *enic, dma_addr_t key_pa, u64 len); int enic_set_rss_cpu(struct enic *enic, dma_addr_t cpu_pa, u64 len); void enic_get_res_counts(struct enic *enic); void enic_init_vnic_resources(struct enic *enic); int enic_alloc_vnic_resources(struct enic *); void enic_free_vnic_resources(struct enic *); #endif /* _ENIC_RES_H_ */
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2019-2020 Icenowy Zheng <[email protected]> */ #include <linux/gpio/consumer.h> #include <linux/delay.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/regulator/consumer.h> #include <drm/drm_mipi_dsi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #define K101_IM2BA02_INIT_CMD_LEN 2 static const char * const regulator_names[] = { "dvdd", "avdd", "cvdd" }; struct k101_im2ba02 { struct drm_panel panel; struct mipi_dsi_device *dsi; struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)]; struct gpio_desc *reset; }; static inline struct k101_im2ba02 *panel_to_k101_im2ba02(struct drm_panel *panel) { return container_of(panel, struct k101_im2ba02, panel); } struct k101_im2ba02_init_cmd { u8 data[K101_IM2BA02_INIT_CMD_LEN]; }; static const struct k101_im2ba02_init_cmd k101_im2ba02_init_cmds[] = { /* Switch to page 0 */ { .data = { 0xE0, 0x00 } }, /* Seems to be some password */ { .data = { 0xE1, 0x93} }, { .data = { 0xE2, 0x65 } }, { .data = { 0xE3, 0xF8 } }, /* Lane number, 0x02 - 3 lanes, 0x03 - 4 lanes */ { .data = { 0x80, 0x03 } }, /* Sequence control */ { .data = { 0x70, 0x02 } }, { .data = { 0x71, 0x23 } }, { .data = { 0x72, 0x06 } }, /* Switch to page 1 */ { .data = { 0xE0, 0x01 } }, /* Set VCOM */ { .data = { 0x00, 0x00 } }, { .data = { 0x01, 0x66 } }, /* Set VCOM_Reverse */ { .data = { 0x03, 0x00 } }, { .data = { 0x04, 0x25 } }, /* Set Gamma Power, VG[MS][PN] */ { .data = { 0x17, 0x00 } }, { .data = { 0x18, 0x6D } }, { .data = { 0x19, 0x00 } }, { .data = { 0x1A, 0x00 } }, { .data = { 0x1B, 0xBF } }, /* VGMN = -4.5V */ { .data = { 0x1C, 0x00 } }, /* Set Gate Power */ { .data = { 0x1F, 0x3E } }, /* VGH_R = 15V */ { .data = { 0x20, 0x28 } }, /* VGL_R = -11V */ { .data = { 0x21, 0x28 } }, /* VGL_R2 = -11V */ { .data = { 0x22, 0x0E } }, /* PA[6:4] = 0, PA[0] = 0 */ /* Set Panel */ { .data = { 0x37, 0x09 } }, /* SS = 1, BGR = 1 */ /* Set RGBCYC */ { .data = { 0x38, 0x04 } }, /* JDT = 100 column inversion */ { .data = { 0x39, 0x08 } }, /* RGB_N_EQ1 */ { .data = { 0x3A, 0x12 } }, /* RGB_N_EQ2 */ { .data = { 0x3C, 0x78 } }, /* set EQ3 for TE_H */ { .data = { 0x3D, 0xFF } }, /* set CHGEN_ON */ { .data = { 0x3E, 0xFF } }, /* set CHGEN_OFF */ { .data = { 0x3F, 0x7F } }, /* set CHGEN_OFF2 */ /* Set TCON parameter */ { .data = { 0x40, 0x06 } }, /* RSO = 800 points */ { .data = { 0x41, 0xA0 } }, /* LN = 1280 lines */ /* Set power voltage */ { .data = { 0x55, 0x0F } }, /* DCDCM */ { .data = { 0x56, 0x01 } }, { .data = { 0x57, 0x69 } }, { .data = { 0x58, 0x0A } }, { .data = { 0x59, 0x0A } }, { .data = { 0x5A, 0x45 } }, { .data = { 0x5B, 0x15 } }, /* Set gamma */ { .data = { 0x5D, 0x7C } }, { .data = { 0x5E, 0x65 } }, { .data = { 0x5F, 0x55 } }, { .data = { 0x60, 0x49 } }, { .data = { 0x61, 0x44 } }, { .data = { 0x62, 0x35 } }, { .data = { 0x63, 0x3A } }, { .data = { 0x64, 0x23 } }, { .data = { 0x65, 0x3D } }, { .data = { 0x66, 0x3C } }, { .data = { 0x67, 0x3D } }, { .data = { 0x68, 0x5D } }, { .data = { 0x69, 0x4D } }, { .data = { 0x6A, 0x56 } }, { .data = { 0x6B, 0x48 } }, { .data = { 0x6C, 0x45 } }, { .data = { 0x6D, 0x38 } }, { .data = { 0x6E, 0x25 } }, { .data = { 0x6F, 0x00 } }, { .data = { 0x70, 0x7C } }, { .data = { 0x71, 0x65 } }, { .data = { 0x72, 0x55 } }, { .data = { 0x73, 0x49 } }, { .data = { 0x74, 0x44 } }, { .data = { 0x75, 0x35 } }, { .data = { 0x76, 0x3A } }, { .data = { 0x77, 0x23 } }, { .data = { 0x78, 0x3D } }, { .data = { 0x79, 0x3C } }, { .data = { 0x7A, 0x3D } }, { .data = { 0x7B, 0x5D } }, { .data = { 0x7C, 0x4D } }, { .data = { 0x7D, 0x56 } }, { .data = { 0x7E, 0x48 } }, { .data = { 0x7F, 0x45 } }, { .data = { 0x80, 0x38 } }, { .data = { 0x81, 0x25 } }, { .data = { 0x82, 0x00 } }, /* Switch to page 2, for GIP */ { .data = { 0xE0, 0x02 } }, { .data = { 0x00, 0x1E } }, { .data = { 0x01, 0x1E } }, { .data = { 0x02, 0x41 } }, { .data = { 0x03, 0x41 } }, { .data = { 0x04, 0x43 } }, { .data = { 0x05, 0x43 } }, { .data = { 0x06, 0x1F } }, { .data = { 0x07, 0x1F } }, { .data = { 0x08, 0x1F } }, { .data = { 0x09, 0x1F } }, { .data = { 0x0A, 0x1E } }, { .data = { 0x0B, 0x1E } }, { .data = { 0x0C, 0x1F } }, { .data = { 0x0D, 0x47 } }, { .data = { 0x0E, 0x47 } }, { .data = { 0x0F, 0x45 } }, { .data = { 0x10, 0x45 } }, { .data = { 0x11, 0x4B } }, { .data = { 0x12, 0x4B } }, { .data = { 0x13, 0x49 } }, { .data = { 0x14, 0x49 } }, { .data = { 0x15, 0x1F } }, { .data = { 0x16, 0x1E } }, { .data = { 0x17, 0x1E } }, { .data = { 0x18, 0x40 } }, { .data = { 0x19, 0x40 } }, { .data = { 0x1A, 0x42 } }, { .data = { 0x1B, 0x42 } }, { .data = { 0x1C, 0x1F } }, { .data = { 0x1D, 0x1F } }, { .data = { 0x1E, 0x1F } }, { .data = { 0x1F, 0x1f } }, { .data = { 0x20, 0x1E } }, { .data = { 0x21, 0x1E } }, { .data = { 0x22, 0x1f } }, { .data = { 0x23, 0x46 } }, { .data = { 0x24, 0x46 } }, { .data = { 0x25, 0x44 } }, { .data = { 0x26, 0x44 } }, { .data = { 0x27, 0x4A } }, { .data = { 0x28, 0x4A } }, { .data = { 0x29, 0x48 } }, { .data = { 0x2A, 0x48 } }, { .data = { 0x2B, 0x1f } }, { .data = { 0x2C, 0x1F } }, { .data = { 0x2D, 0x1F } }, { .data = { 0x2E, 0x42 } }, { .data = { 0x2F, 0x42 } }, { .data = { 0x30, 0x40 } }, { .data = { 0x31, 0x40 } }, { .data = { 0x32, 0x1E } }, { .data = { 0x33, 0x1E } }, { .data = { 0x34, 0x1F } }, { .data = { 0x35, 0x1F } }, { .data = { 0x36, 0x1E } }, { .data = { 0x37, 0x1E } }, { .data = { 0x38, 0x1F } }, { .data = { 0x39, 0x48 } }, { .data = { 0x3A, 0x48 } }, { .data = { 0x3B, 0x4A } }, { .data = { 0x3C, 0x4A } }, { .data = { 0x3D, 0x44 } }, { .data = { 0x3E, 0x44 } }, { .data = { 0x3F, 0x46 } }, { .data = { 0x40, 0x46 } }, { .data = { 0x41, 0x1F } }, { .data = { 0x42, 0x1F } }, { .data = { 0x43, 0x1F } }, { .data = { 0x44, 0x43 } }, { .data = { 0x45, 0x43 } }, { .data = { 0x46, 0x41 } }, { .data = { 0x47, 0x41 } }, { .data = { 0x48, 0x1E } }, { .data = { 0x49, 0x1E } }, { .data = { 0x4A, 0x1E } }, { .data = { 0x4B, 0x1F } }, { .data = { 0x4C, 0x1E } }, { .data = { 0x4D, 0x1E } }, { .data = { 0x4E, 0x1F } }, { .data = { 0x4F, 0x49 } }, { .data = { 0x50, 0x49 } }, { .data = { 0x51, 0x4B } }, { .data = { 0x52, 0x4B } }, { .data = { 0x53, 0x45 } }, { .data = { 0x54, 0x45 } }, { .data = { 0x55, 0x47 } }, { .data = { 0x56, 0x47 } }, { .data = { 0x57, 0x1F } }, { .data = { 0x58, 0x10 } }, { .data = { 0x59, 0x00 } }, { .data = { 0x5A, 0x00 } }, { .data = { 0x5B, 0x30 } }, { .data = { 0x5C, 0x02 } }, { .data = { 0x5D, 0x40 } }, { .data = { 0x5E, 0x01 } }, { .data = { 0x5F, 0x02 } }, { .data = { 0x60, 0x30 } }, { .data = { 0x61, 0x01 } }, { .data = { 0x62, 0x02 } }, { .data = { 0x63, 0x6A } }, { .data = { 0x64, 0x6A } }, { .data = { 0x65, 0x05 } }, { .data = { 0x66, 0x12 } }, { .data = { 0x67, 0x74 } }, { .data = { 0x68, 0x04 } }, { .data = { 0x69, 0x6A } }, { .data = { 0x6A, 0x6A } }, { .data = { 0x6B, 0x08 } }, { .data = { 0x6C, 0x00 } }, { .data = { 0x6D, 0x04 } }, { .data = { 0x6E, 0x04 } }, { .data = { 0x6F, 0x88 } }, { .data = { 0x70, 0x00 } }, { .data = { 0x71, 0x00 } }, { .data = { 0x72, 0x06 } }, { .data = { 0x73, 0x7B } }, { .data = { 0x74, 0x00 } }, { .data = { 0x75, 0x07 } }, { .data = { 0x76, 0x00 } }, { .data = { 0x77, 0x5D } }, { .data = { 0x78, 0x17 } }, { .data = { 0x79, 0x1F } }, { .data = { 0x7A, 0x00 } }, { .data = { 0x7B, 0x00 } }, { .data = { 0x7C, 0x00 } }, { .data = { 0x7D, 0x03 } }, { .data = { 0x7E, 0x7B } }, { .data = { 0xE0, 0x04 } }, { .data = { 0x2B, 0x2B } }, { .data = { 0x2E, 0x44 } }, { .data = { 0xE0, 0x01 } }, { .data = { 0x0E, 0x01 } }, { .data = { 0xE0, 0x03 } }, { .data = { 0x98, 0x2F } }, { .data = { 0xE0, 0x00 } }, { .data = { 0xE6, 0x02 } }, { .data = { 0xE7, 0x02 } }, { .data = { 0x11, 0x00 } }, }; static const struct k101_im2ba02_init_cmd timed_cmds[] = { { .data = { 0x29, 0x00 } }, { .data = { 0x35, 0x00 } }, }; static int k101_im2ba02_prepare(struct drm_panel *panel) { struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel); struct mipi_dsi_device *dsi = ctx->dsi; unsigned int i; int ret; ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret) return ret; msleep(30); gpiod_set_value(ctx->reset, 1); msleep(50); gpiod_set_value(ctx->reset, 0); msleep(50); gpiod_set_value(ctx->reset, 1); msleep(200); for (i = 0; i < ARRAY_SIZE(k101_im2ba02_init_cmds); i++) { const struct k101_im2ba02_init_cmd *cmd = &k101_im2ba02_init_cmds[i]; ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN); if (ret < 0) goto powerdown; } return 0; powerdown: gpiod_set_value(ctx->reset, 0); msleep(50); return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); } static int k101_im2ba02_enable(struct drm_panel *panel) { struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel); const struct k101_im2ba02_init_cmd *cmd = &timed_cmds[1]; int ret; msleep(150); ret = mipi_dsi_dcs_set_display_on(ctx->dsi); if (ret < 0) return ret; msleep(50); return mipi_dsi_dcs_write_buffer(ctx->dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN); } static int k101_im2ba02_disable(struct drm_panel *panel) { struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel); return mipi_dsi_dcs_set_display_off(ctx->dsi); } static int k101_im2ba02_unprepare(struct drm_panel *panel) { struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel); int ret; ret = mipi_dsi_dcs_set_display_off(ctx->dsi); if (ret < 0) dev_err(panel->dev, "failed to set display off: %d\n", ret); ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi); if (ret < 0) dev_err(panel->dev, "failed to enter sleep mode: %d\n", ret); msleep(200); gpiod_set_value(ctx->reset, 0); msleep(20); return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); } static const struct drm_display_mode k101_im2ba02_default_mode = { .clock = 70000, .hdisplay = 800, .hsync_start = 800 + 20, .hsync_end = 800 + 20 + 20, .htotal = 800 + 20 + 20 + 20, .vdisplay = 1280, .vsync_start = 1280 + 16, .vsync_end = 1280 + 16 + 4, .vtotal = 1280 + 16 + 4 + 4, .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, .width_mm = 136, .height_mm = 217, }; static int k101_im2ba02_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel); struct drm_display_mode *mode; mode = drm_mode_duplicate(connector->dev, &k101_im2ba02_default_mode); if (!mode) { dev_err(&ctx->dsi->dev, "failed to add mode %ux%u@%u\n", k101_im2ba02_default_mode.hdisplay, k101_im2ba02_default_mode.vdisplay, drm_mode_vrefresh(&k101_im2ba02_default_mode)); return -ENOMEM; } drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs k101_im2ba02_funcs = { .disable = k101_im2ba02_disable, .unprepare = k101_im2ba02_unprepare, .prepare = k101_im2ba02_prepare, .enable = k101_im2ba02_enable, .get_modes = k101_im2ba02_get_modes, }; static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi) { struct k101_im2ba02 *ctx; unsigned int i; int ret; ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; mipi_dsi_set_drvdata(dsi, ctx); ctx->dsi = dsi; for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++) ctx->supplies[i].supply = regulator_names[i]; ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies), ctx->supplies); if (ret < 0) return dev_err_probe(&dsi->dev, ret, "Couldn't get regulators\n"); ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(ctx->reset)) return dev_err_probe(&dsi->dev, PTR_ERR(ctx->reset), "Couldn't get our reset GPIO\n"); drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs, DRM_MODE_CONNECTOR_DSI); ret = drm_panel_of_backlight(&ctx->panel); if (ret) return ret; drm_panel_add(&ctx->panel); dsi->mode_flags = MIPI_DSI_MODE_VIDEO; dsi->format = MIPI_DSI_FMT_RGB888; dsi->lanes = 4; ret = mipi_dsi_attach(dsi); if (ret < 0) { drm_panel_remove(&ctx->panel); return ret; } return 0; } static void k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi) { struct k101_im2ba02 *ctx = mipi_dsi_get_drvdata(dsi); mipi_dsi_detach(dsi); drm_panel_remove(&ctx->panel); } static const struct of_device_id k101_im2ba02_of_match[] = { { .compatible = "feixin,k101-im2ba02", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, k101_im2ba02_of_match); static struct mipi_dsi_driver k101_im2ba02_driver = { .probe = k101_im2ba02_dsi_probe, .remove = k101_im2ba02_dsi_remove, .driver = { .name = "feixin-k101-im2ba02", .of_match_table = k101_im2ba02_of_match, }, }; module_mipi_dsi_driver(k101_im2ba02_driver); MODULE_AUTHOR("Icenowy Zheng <[email protected]>"); MODULE_DESCRIPTION("Feixin K101 IM2BA02 MIPI-DSI LCD panel"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-or-later /* L2TPv3 IP encapsulation support for IPv6 * * Copyright (c) 2012 Katalix Systems Ltd */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/icmp.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/random.h> #include <linux/socket.h> #include <linux/l2tp.h> #include <linux/in.h> #include <linux/in6.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/udp.h> #include <net/inet_common.h> #include <net/tcp_states.h> #include <net/protocol.h> #include <net/xfrm.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <net/transp_v6.h> #include <net/addrconf.h> #include <net/ip6_route.h> #include "l2tp_core.h" /* per-net private data for this module */ static unsigned int l2tp_ip6_net_id; struct l2tp_ip6_net { rwlock_t l2tp_ip6_lock; struct hlist_head l2tp_ip6_table; struct hlist_head l2tp_ip6_bind_table; }; struct l2tp_ip6_sock { /* inet_sock has to be the first member of l2tp_ip6_sock */ struct inet_sock inet; u32 conn_id; u32 peer_conn_id; struct ipv6_pinfo inet6; }; static struct l2tp_ip6_sock *l2tp_ip6_sk(const struct sock *sk) { return (struct l2tp_ip6_sock *)sk; } static struct l2tp_ip6_net *l2tp_ip6_pernet(const struct net *net) { return net_generic(net, l2tp_ip6_net_id); } static struct sock *__l2tp_ip6_bind_lookup(const struct net *net, const struct in6_addr *laddr, const struct in6_addr *raddr, int dif, u32 tunnel_id) { struct l2tp_ip6_net *pn = l2tp_ip6_pernet(net); struct sock *sk; sk_for_each_bound(sk, &pn->l2tp_ip6_bind_table) { const struct in6_addr *sk_laddr = inet6_rcv_saddr(sk); const struct in6_addr *sk_raddr = &sk->sk_v6_daddr; const struct l2tp_ip6_sock *l2tp = l2tp_ip6_sk(sk); int bound_dev_if; if (!net_eq(sock_net(sk), net)) continue; bound_dev_if = READ_ONCE(sk->sk_bound_dev_if); if (bound_dev_if && dif && bound_dev_if != dif) continue; if (sk_laddr && !ipv6_addr_any(sk_laddr) && !ipv6_addr_any(laddr) && !ipv6_addr_equal(sk_laddr, laddr)) continue; if (!ipv6_addr_any(sk_raddr) && raddr && !ipv6_addr_any(raddr) && !ipv6_addr_equal(sk_raddr, raddr)) continue; if (l2tp->conn_id != tunnel_id) continue; goto found; } sk = NULL; found: return sk; } /* When processing receive frames, there are two cases to * consider. Data frames consist of a non-zero session-id and an * optional cookie. Control frames consist of a regular L2TP header * preceded by 32-bits of zeros. * * L2TPv3 Session Header Over IP * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Session ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Cookie (optional, maximum 64 bits)... * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * L2TPv3 Control Message Header Over IP * * 0 1 2 3 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | (32 bits of zeros) | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * |T|L|x|x|S|x|x|x|x|x|x|x| Ver | Length | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Control Connection ID | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * | Ns | Nr | * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ * * All control frames are passed to userspace. */ static int l2tp_ip6_recv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); struct l2tp_ip6_net *pn; struct sock *sk; u32 session_id; u32 tunnel_id; unsigned char *ptr, *optr; struct l2tp_session *session; struct l2tp_tunnel *tunnel = NULL; struct ipv6hdr *iph; pn = l2tp_ip6_pernet(net); if (!pskb_may_pull(skb, 4)) goto discard; /* Point to L2TP header */ optr = skb->data; ptr = skb->data; session_id = ntohl(*((__be32 *)ptr)); ptr += 4; /* RFC3931: L2TP/IP packets have the first 4 bytes containing * the session_id. If it is 0, the packet is a L2TP control * frame and the session_id value can be discarded. */ if (session_id == 0) { __skb_pull(skb, 4); goto pass_up; } /* Ok, this is a data packet. Lookup the session. */ session = l2tp_v3_session_get(net, NULL, session_id); if (!session) goto discard; tunnel = session->tunnel; if (!tunnel) goto discard_sess; if (l2tp_v3_ensure_opt_in_linear(session, skb, &ptr, &optr)) goto discard_sess; l2tp_recv_common(session, skb, ptr, optr, 0, skb->len); l2tp_session_put(session); return 0; pass_up: /* Get the tunnel_id from the L2TP header */ if (!pskb_may_pull(skb, 12)) goto discard; if ((skb->data[0] & 0xc0) != 0xc0) goto discard; tunnel_id = ntohl(*(__be32 *)&skb->data[4]); iph = ipv6_hdr(skb); read_lock_bh(&pn->l2tp_ip6_lock); sk = __l2tp_ip6_bind_lookup(net, &iph->daddr, &iph->saddr, inet6_iif(skb), tunnel_id); if (!sk) { read_unlock_bh(&pn->l2tp_ip6_lock); goto discard; } sock_hold(sk); read_unlock_bh(&pn->l2tp_ip6_lock); if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_put; nf_reset_ct(skb); return sk_receive_skb(sk, skb, 1); discard_sess: l2tp_session_put(session); goto discard; discard_put: sock_put(sk); discard: kfree_skb(skb); return 0; } static int l2tp_ip6_hash(struct sock *sk) { struct l2tp_ip6_net *pn = l2tp_ip6_pernet(sock_net(sk)); if (sk_unhashed(sk)) { write_lock_bh(&pn->l2tp_ip6_lock); sk_add_node(sk, &pn->l2tp_ip6_table); write_unlock_bh(&pn->l2tp_ip6_lock); } return 0; } static void l2tp_ip6_unhash(struct sock *sk) { struct l2tp_ip6_net *pn = l2tp_ip6_pernet(sock_net(sk)); if (sk_unhashed(sk)) return; write_lock_bh(&pn->l2tp_ip6_lock); sk_del_node_init(sk); write_unlock_bh(&pn->l2tp_ip6_lock); } static int l2tp_ip6_open(struct sock *sk) { /* Prevent autobind. We don't have ports. */ inet_sk(sk)->inet_num = IPPROTO_L2TP; l2tp_ip6_hash(sk); return 0; } static void l2tp_ip6_close(struct sock *sk, long timeout) { struct l2tp_ip6_net *pn = l2tp_ip6_pernet(sock_net(sk)); write_lock_bh(&pn->l2tp_ip6_lock); hlist_del_init(&sk->sk_bind_node); sk_del_node_init(sk); write_unlock_bh(&pn->l2tp_ip6_lock); sk_common_release(sk); } static void l2tp_ip6_destroy_sock(struct sock *sk) { struct l2tp_tunnel *tunnel; lock_sock(sk); ip6_flush_pending_frames(sk); release_sock(sk); tunnel = l2tp_sk_to_tunnel(sk); if (tunnel) { l2tp_tunnel_delete(tunnel); l2tp_tunnel_put(tunnel); } } static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct inet_sock *inet = inet_sk(sk); struct ipv6_pinfo *np = inet6_sk(sk); struct sockaddr_l2tpip6 *addr = (struct sockaddr_l2tpip6 *)uaddr; struct net *net = sock_net(sk); struct l2tp_ip6_net *pn; __be32 v4addr = 0; int bound_dev_if; int addr_type; int err; pn = l2tp_ip6_pernet(net); if (addr->l2tp_family != AF_INET6) return -EINVAL; if (addr_len < sizeof(*addr)) return -EINVAL; addr_type = ipv6_addr_type(&addr->l2tp_addr); /* l2tp_ip6 sockets are IPv6 only */ if (addr_type == IPV6_ADDR_MAPPED) return -EADDRNOTAVAIL; /* L2TP is point-point, not multicast */ if (addr_type & IPV6_ADDR_MULTICAST) return -EADDRNOTAVAIL; lock_sock(sk); err = -EINVAL; if (!sock_flag(sk, SOCK_ZAPPED)) goto out_unlock; if (sk->sk_state != TCP_CLOSE) goto out_unlock; bound_dev_if = sk->sk_bound_dev_if; /* Check if the address belongs to the host. */ rcu_read_lock(); if (addr_type != IPV6_ADDR_ANY) { struct net_device *dev = NULL; if (addr_type & IPV6_ADDR_LINKLOCAL) { if (addr->l2tp_scope_id) bound_dev_if = addr->l2tp_scope_id; /* Binding to link-local address requires an * interface. */ if (!bound_dev_if) goto out_unlock_rcu; err = -ENODEV; dev = dev_get_by_index_rcu(sock_net(sk), bound_dev_if); if (!dev) goto out_unlock_rcu; } /* ipv4 addr of the socket is invalid. Only the * unspecified and mapped address have a v4 equivalent. */ v4addr = LOOPBACK4_IPV6; err = -EADDRNOTAVAIL; if (!ipv6_chk_addr(sock_net(sk), &addr->l2tp_addr, dev, 0)) goto out_unlock_rcu; } rcu_read_unlock(); write_lock_bh(&pn->l2tp_ip6_lock); if (__l2tp_ip6_bind_lookup(net, &addr->l2tp_addr, NULL, bound_dev_if, addr->l2tp_conn_id)) { write_unlock_bh(&pn->l2tp_ip6_lock); err = -EADDRINUSE; goto out_unlock; } inet->inet_saddr = v4addr; inet->inet_rcv_saddr = v4addr; sk->sk_bound_dev_if = bound_dev_if; sk->sk_v6_rcv_saddr = addr->l2tp_addr; np->saddr = addr->l2tp_addr; l2tp_ip6_sk(sk)->conn_id = addr->l2tp_conn_id; sk_add_bind_node(sk, &pn->l2tp_ip6_bind_table); sk_del_node_init(sk); write_unlock_bh(&pn->l2tp_ip6_lock); sock_reset_flag(sk, SOCK_ZAPPED); release_sock(sk); return 0; out_unlock_rcu: rcu_read_unlock(); out_unlock: release_sock(sk); return err; } static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr; struct sockaddr_in6 *usin = (struct sockaddr_in6 *)uaddr; struct in6_addr *daddr; int addr_type; int rc; struct l2tp_ip6_net *pn; if (addr_len < sizeof(*lsa)) return -EINVAL; if (usin->sin6_family != AF_INET6) return -EINVAL; addr_type = ipv6_addr_type(&usin->sin6_addr); if (addr_type & IPV6_ADDR_MULTICAST) return -EINVAL; if (addr_type & IPV6_ADDR_MAPPED) { daddr = &usin->sin6_addr; if (ipv4_is_multicast(daddr->s6_addr32[3])) return -EINVAL; } lock_sock(sk); /* Must bind first - autobinding does not work */ if (sock_flag(sk, SOCK_ZAPPED)) { rc = -EINVAL; goto out_sk; } rc = __ip6_datagram_connect(sk, uaddr, addr_len); if (rc < 0) goto out_sk; l2tp_ip6_sk(sk)->peer_conn_id = lsa->l2tp_conn_id; pn = l2tp_ip6_pernet(sock_net(sk)); write_lock_bh(&pn->l2tp_ip6_lock); hlist_del_init(&sk->sk_bind_node); sk_add_bind_node(sk, &pn->l2tp_ip6_bind_table); write_unlock_bh(&pn->l2tp_ip6_lock); out_sk: release_sock(sk); return rc; } static int l2tp_ip6_disconnect(struct sock *sk, int flags) { if (sock_flag(sk, SOCK_ZAPPED)) return 0; return __udp_disconnect(sk, flags); } static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, int peer) { struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)uaddr; struct sock *sk = sock->sk; struct ipv6_pinfo *np = inet6_sk(sk); struct l2tp_ip6_sock *lsk = l2tp_ip6_sk(sk); lsa->l2tp_family = AF_INET6; lsa->l2tp_flowinfo = 0; lsa->l2tp_scope_id = 0; lsa->l2tp_unused = 0; if (peer) { if (!lsk->peer_conn_id) return -ENOTCONN; lsa->l2tp_conn_id = lsk->peer_conn_id; lsa->l2tp_addr = sk->sk_v6_daddr; if (inet6_test_bit(SNDFLOW, sk)) lsa->l2tp_flowinfo = np->flow_label; } else { if (ipv6_addr_any(&sk->sk_v6_rcv_saddr)) lsa->l2tp_addr = np->saddr; else lsa->l2tp_addr = sk->sk_v6_rcv_saddr; lsa->l2tp_conn_id = lsk->conn_id; } if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) lsa->l2tp_scope_id = READ_ONCE(sk->sk_bound_dev_if); return sizeof(*lsa); } static int l2tp_ip6_backlog_recv(struct sock *sk, struct sk_buff *skb) { int rc; /* Charge it to the socket, dropping if the queue is full. */ rc = sock_queue_rcv_skb(sk, skb); if (rc < 0) goto drop; return 0; drop: IP_INC_STATS(sock_net(sk), IPSTATS_MIB_INDISCARDS); kfree_skb(skb); return -1; } static int l2tp_ip6_push_pending_frames(struct sock *sk) { struct sk_buff *skb; __be32 *transhdr = NULL; int err = 0; skb = skb_peek(&sk->sk_write_queue); if (!skb) goto out; transhdr = (__be32 *)skb_transport_header(skb); *transhdr = 0; err = ip6_push_pending_frames(sk); out: return err; } /* Userspace will call sendmsg() on the tunnel socket to send L2TP * control frames. */ static int l2tp_ip6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) { struct ipv6_txoptions opt_space; DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); struct in6_addr *daddr, *final_p, final; struct ipv6_pinfo *np = inet6_sk(sk); struct ipv6_txoptions *opt_to_free = NULL; struct ipv6_txoptions *opt = NULL; struct ip6_flowlabel *flowlabel = NULL; struct dst_entry *dst = NULL; struct flowi6 fl6; struct ipcm6_cookie ipc6; int addr_len = msg->msg_namelen; int transhdrlen = 4; /* zero session-id */ int ulen; int err; /* Rough check on arithmetic overflow, * better check is made in ip6_append_data(). */ if (len > INT_MAX - transhdrlen) return -EMSGSIZE; /* Mirror BSD error message compatibility */ if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* Get and verify the address */ memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_mark = READ_ONCE(sk->sk_mark); fl6.flowi6_uid = sk->sk_uid; ipcm6_init(&ipc6); if (lsa) { if (addr_len < SIN6_LEN_RFC2133) return -EINVAL; if (lsa->l2tp_family && lsa->l2tp_family != AF_INET6) return -EAFNOSUPPORT; daddr = &lsa->l2tp_addr; if (inet6_test_bit(SNDFLOW, sk)) { fl6.flowlabel = lsa->l2tp_flowinfo & IPV6_FLOWINFO_MASK; if (fl6.flowlabel & IPV6_FLOWLABEL_MASK) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (IS_ERR(flowlabel)) return -EINVAL; } } /* Otherwise it will be difficult to maintain * sk->sk_dst_cache. */ if (sk->sk_state == TCP_ESTABLISHED && ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) daddr = &sk->sk_v6_daddr; if (addr_len >= sizeof(struct sockaddr_in6) && lsa->l2tp_scope_id && ipv6_addr_type(daddr) & IPV6_ADDR_LINKLOCAL) fl6.flowi6_oif = lsa->l2tp_scope_id; } else { if (sk->sk_state != TCP_ESTABLISHED) return -EDESTADDRREQ; daddr = &sk->sk_v6_daddr; fl6.flowlabel = np->flow_label; } if (fl6.flowi6_oif == 0) fl6.flowi6_oif = READ_ONCE(sk->sk_bound_dev_if); if (msg->msg_controllen) { opt = &opt_space; memset(opt, 0, sizeof(struct ipv6_txoptions)); opt->tot_len = sizeof(struct ipv6_txoptions); ipc6.opt = opt; err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6); if (err < 0) { fl6_sock_release(flowlabel); return err; } if ((fl6.flowlabel & IPV6_FLOWLABEL_MASK) && !flowlabel) { flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); if (IS_ERR(flowlabel)) return -EINVAL; } if (!(opt->opt_nflen | opt->opt_flen)) opt = NULL; } if (!opt) { opt = txopt_get(np); opt_to_free = opt; } if (flowlabel) opt = fl6_merge_options(&opt_space, flowlabel, opt); opt = ipv6_fixup_options(&opt_space, opt); ipc6.opt = opt; fl6.flowi6_proto = sk->sk_protocol; if (!ipv6_addr_any(daddr)) fl6.daddr = *daddr; else fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) fl6.saddr = np->saddr; final_p = fl6_update_dst(&fl6, opt, &final); if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) fl6.flowi6_oif = READ_ONCE(np->mcast_oif); else if (!fl6.flowi6_oif) fl6.flowi6_oif = READ_ONCE(np->ucast_oif); security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); if (ipc6.tclass < 0) ipc6.tclass = np->tclass; fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); if (IS_ERR(dst)) { err = PTR_ERR(dst); goto out; } if (ipc6.hlimit < 0) ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); if (ipc6.dontfrag < 0) ipc6.dontfrag = inet6_test_bit(DONTFRAG, sk); if (msg->msg_flags & MSG_CONFIRM) goto do_confirm; back_from_confirm: lock_sock(sk); ulen = len + (skb_queue_empty(&sk->sk_write_queue) ? transhdrlen : 0); err = ip6_append_data(sk, ip_generic_getfrag, msg, ulen, transhdrlen, &ipc6, &fl6, dst_rt6_info(dst), msg->msg_flags); if (err) ip6_flush_pending_frames(sk); else if (!(msg->msg_flags & MSG_MORE)) err = l2tp_ip6_push_pending_frames(sk); release_sock(sk); done: dst_release(dst); out: fl6_sock_release(flowlabel); txopt_put(opt_to_free); return err < 0 ? err : len; do_confirm: if (msg->msg_flags & MSG_PROBE) dst_confirm_neigh(dst, &fl6.daddr); if (!(msg->msg_flags & MSG_PROBE) || len) goto back_from_confirm; err = 0; goto done; } static int l2tp_ip6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct ipv6_pinfo *np = inet6_sk(sk); DECLARE_SOCKADDR(struct sockaddr_l2tpip6 *, lsa, msg->msg_name); size_t copied = 0; int err = -EOPNOTSUPP; struct sk_buff *skb; if (flags & MSG_OOB) goto out; if (flags & MSG_ERRQUEUE) return ipv6_recv_error(sk, msg, len, addr_len); skb = skb_recv_datagram(sk, flags, &err); if (!skb) goto out; copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; copied = len; } err = skb_copy_datagram_msg(skb, 0, msg, copied); if (err) goto done; sock_recv_timestamp(msg, sk, skb); /* Copy the address. */ if (lsa) { lsa->l2tp_family = AF_INET6; lsa->l2tp_unused = 0; lsa->l2tp_addr = ipv6_hdr(skb)->saddr; lsa->l2tp_flowinfo = 0; lsa->l2tp_scope_id = 0; lsa->l2tp_conn_id = 0; if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL) lsa->l2tp_scope_id = inet6_iif(skb); *addr_len = sizeof(*lsa); } if (np->rxopt.all) ip6_datagram_recv_ctl(sk, msg, skb); if (flags & MSG_TRUNC) copied = skb->len; done: skb_free_datagram(sk, skb); out: return err ? err : copied; } static struct proto l2tp_ip6_prot = { .name = "L2TP/IPv6", .owner = THIS_MODULE, .init = l2tp_ip6_open, .close = l2tp_ip6_close, .bind = l2tp_ip6_bind, .connect = l2tp_ip6_connect, .disconnect = l2tp_ip6_disconnect, .ioctl = l2tp_ioctl, .destroy = l2tp_ip6_destroy_sock, .setsockopt = ipv6_setsockopt, .getsockopt = ipv6_getsockopt, .sendmsg = l2tp_ip6_sendmsg, .recvmsg = l2tp_ip6_recvmsg, .backlog_rcv = l2tp_ip6_backlog_recv, .hash = l2tp_ip6_hash, .unhash = l2tp_ip6_unhash, .obj_size = sizeof(struct l2tp_ip6_sock), .ipv6_pinfo_offset = offsetof(struct l2tp_ip6_sock, inet6), }; static const struct proto_ops l2tp_ip6_ops = { .family = PF_INET6, .owner = THIS_MODULE, .release = inet6_release, .bind = inet6_bind, .connect = inet_dgram_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = l2tp_ip6_getname, .poll = datagram_poll, .ioctl = inet6_ioctl, .gettstamp = sock_gettstamp, .listen = sock_no_listen, .shutdown = inet_shutdown, .setsockopt = sock_common_setsockopt, .getsockopt = sock_common_getsockopt, .sendmsg = inet_sendmsg, .recvmsg = sock_common_recvmsg, .mmap = sock_no_mmap, #ifdef CONFIG_COMPAT .compat_ioctl = inet6_compat_ioctl, #endif }; static struct inet_protosw l2tp_ip6_protosw = { .type = SOCK_DGRAM, .protocol = IPPROTO_L2TP, .prot = &l2tp_ip6_prot, .ops = &l2tp_ip6_ops, }; static struct inet6_protocol l2tp_ip6_protocol __read_mostly = { .handler = l2tp_ip6_recv, }; static __net_init int l2tp_ip6_init_net(struct net *net) { struct l2tp_ip6_net *pn = net_generic(net, l2tp_ip6_net_id); rwlock_init(&pn->l2tp_ip6_lock); INIT_HLIST_HEAD(&pn->l2tp_ip6_table); INIT_HLIST_HEAD(&pn->l2tp_ip6_bind_table); return 0; } static __net_exit void l2tp_ip6_exit_net(struct net *net) { struct l2tp_ip6_net *pn = l2tp_ip6_pernet(net); write_lock_bh(&pn->l2tp_ip6_lock); WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip6_table) != 0); WARN_ON_ONCE(hlist_count_nodes(&pn->l2tp_ip6_bind_table) != 0); write_unlock_bh(&pn->l2tp_ip6_lock); } static struct pernet_operations l2tp_ip6_net_ops = { .init = l2tp_ip6_init_net, .exit = l2tp_ip6_exit_net, .id = &l2tp_ip6_net_id, .size = sizeof(struct l2tp_ip6_net), }; static int __init l2tp_ip6_init(void) { int err; pr_info("L2TP IP encapsulation support for IPv6 (L2TPv3)\n"); err = register_pernet_device(&l2tp_ip6_net_ops); if (err) goto out; err = proto_register(&l2tp_ip6_prot, 1); if (err != 0) goto out1; err = inet6_add_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP); if (err) goto out2; inet6_register_protosw(&l2tp_ip6_protosw); return 0; out2: proto_unregister(&l2tp_ip6_prot); out1: unregister_pernet_device(&l2tp_ip6_net_ops); out: return err; } static void __exit l2tp_ip6_exit(void) { inet6_unregister_protosw(&l2tp_ip6_protosw); inet6_del_protocol(&l2tp_ip6_protocol, IPPROTO_L2TP); proto_unregister(&l2tp_ip6_prot); unregister_pernet_device(&l2tp_ip6_net_ops); } module_init(l2tp_ip6_init); module_exit(l2tp_ip6_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Chris Elston <[email protected]>"); MODULE_DESCRIPTION("L2TP IP encapsulation for IPv6"); MODULE_VERSION("1.0"); /* Use the values of SOCK_DGRAM (2) as type and IPPROTO_L2TP (115) as protocol, * because __stringify doesn't like enums */ MODULE_ALIAS_NET_PF_PROTO_TYPE(PF_INET6, 115, 2); MODULE_ALIAS_NET_PF_PROTO(PF_INET6, 115);
// SPDX-License-Identifier: MIT /* * Copyright © 2020 Intel Corporation */ #include <linux/debugfs.h> #include <linux/string_helpers.h> #include <drm/drm_print.h> #include "gt/intel_gt_debugfs.h" #include "intel_guc_debugfs.h" #include "intel_gsc_uc_debugfs.h" #include "intel_huc_debugfs.h" #include "intel_uc.h" #include "intel_uc_debugfs.h" static int uc_usage_show(struct seq_file *m, void *data) { struct intel_uc *uc = m->private; struct drm_printer p = drm_seq_file_printer(m); drm_printf(&p, "[guc] supported:%s wanted:%s used:%s\n", str_yes_no(intel_uc_supports_guc(uc)), str_yes_no(intel_uc_wants_guc(uc)), str_yes_no(intel_uc_uses_guc(uc))); drm_printf(&p, "[huc] supported:%s wanted:%s used:%s\n", str_yes_no(intel_uc_supports_huc(uc)), str_yes_no(intel_uc_wants_huc(uc)), str_yes_no(intel_uc_uses_huc(uc))); drm_printf(&p, "[submission] supported:%s wanted:%s used:%s\n", str_yes_no(intel_uc_supports_guc_submission(uc)), str_yes_no(intel_uc_wants_guc_submission(uc)), str_yes_no(intel_uc_uses_guc_submission(uc))); return 0; } DEFINE_INTEL_GT_DEBUGFS_ATTRIBUTE(uc_usage); void intel_uc_debugfs_register(struct intel_uc *uc, struct dentry *gt_root) { static const struct intel_gt_debugfs_file files[] = { { "usage", &uc_usage_fops, NULL }, }; struct dentry *root; if (!gt_root) return; /* GuC and HuC go always in pair, no need to check both */ if (!intel_uc_supports_guc(uc)) return; root = debugfs_create_dir("uc", gt_root); if (IS_ERR(root)) return; uc->guc.dbgfs_node = root; intel_gt_debugfs_register_files(root, files, ARRAY_SIZE(files), uc); intel_gsc_uc_debugfs_register(&uc->gsc, root); intel_guc_debugfs_register(&uc->guc, root); intel_huc_debugfs_register(&uc->huc, root); }
/* * Copyright 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef __AMDGPU_GFX_H__ #define __AMDGPU_GFX_H__ /* * GFX stuff */ #include "clearstate_defs.h" #include "amdgpu_ring.h" #include "amdgpu_rlc.h" #include "amdgpu_imu.h" #include "soc15.h" #include "amdgpu_ras.h" #include "amdgpu_ring_mux.h" #include "amdgpu_xcp.h" /* GFX current status */ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L #define AMDGPU_GFX_SAFE_MODE 0x00000001L #define AMDGPU_GFX_PG_DISABLED_MODE 0x00000002L #define AMDGPU_GFX_CG_DISABLED_MODE 0x00000004L #define AMDGPU_GFX_LBPW_DISABLED_MODE 0x00000008L #define AMDGPU_MAX_GC_INSTANCES 8 #define AMDGPU_MAX_QUEUES 128 #define AMDGPU_MAX_GFX_QUEUES AMDGPU_MAX_QUEUES #define AMDGPU_MAX_COMPUTE_QUEUES AMDGPU_MAX_QUEUES enum amdgpu_gfx_pipe_priority { AMDGPU_GFX_PIPE_PRIO_NORMAL = AMDGPU_RING_PRIO_1, AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2 }; #define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0 #define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15 enum amdgpu_gfx_partition { AMDGPU_SPX_PARTITION_MODE = 0, AMDGPU_DPX_PARTITION_MODE = 1, AMDGPU_TPX_PARTITION_MODE = 2, AMDGPU_QPX_PARTITION_MODE = 3, AMDGPU_CPX_PARTITION_MODE = 4, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE = -1, /* Automatically choose the right mode */ AMDGPU_AUTO_COMPUTE_PARTITION_MODE = -2, }; #define NUM_XCC(x) hweight16(x) enum amdgpu_gfx_ras_mem_id_type { AMDGPU_GFX_CP_MEM = 0, AMDGPU_GFX_GCEA_MEM, AMDGPU_GFX_GC_CANE_MEM, AMDGPU_GFX_GCUTCL2_MEM, AMDGPU_GFX_GDS_MEM, AMDGPU_GFX_LDS_MEM, AMDGPU_GFX_RLC_MEM, AMDGPU_GFX_SP_MEM, AMDGPU_GFX_SPI_MEM, AMDGPU_GFX_SQC_MEM, AMDGPU_GFX_SQ_MEM, AMDGPU_GFX_TA_MEM, AMDGPU_GFX_TCC_MEM, AMDGPU_GFX_TCA_MEM, AMDGPU_GFX_TCI_MEM, AMDGPU_GFX_TCP_MEM, AMDGPU_GFX_TD_MEM, AMDGPU_GFX_TCX_MEM, AMDGPU_GFX_ATC_L2_MEM, AMDGPU_GFX_UTCL2_MEM, AMDGPU_GFX_VML2_MEM, AMDGPU_GFX_VML2_WALKER_MEM, AMDGPU_GFX_MEM_TYPE_NUM }; struct amdgpu_mec { struct amdgpu_bo *hpd_eop_obj; u64 hpd_eop_gpu_addr; struct amdgpu_bo *mec_fw_obj; u64 mec_fw_gpu_addr; struct amdgpu_bo *mec_fw_data_obj; u64 mec_fw_data_gpu_addr; u32 num_mec; u32 num_pipe_per_mec; u32 num_queue_per_pipe; void *mqd_backup[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES]; }; struct amdgpu_mec_bitmap { /* These are the resources for which amdgpu takes ownership */ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); }; enum amdgpu_unmap_queues_action { PREEMPT_QUEUES = 0, RESET_QUEUES, DISABLE_PROCESS_QUEUES, PREEMPT_QUEUES_NO_UNMAP, }; struct kiq_pm4_funcs { /* Support ASIC-specific kiq pm4 packets*/ void (*kiq_set_resources)(struct amdgpu_ring *kiq_ring, uint64_t queue_mask); void (*kiq_map_queues)(struct amdgpu_ring *kiq_ring, struct amdgpu_ring *ring); void (*kiq_unmap_queues)(struct amdgpu_ring *kiq_ring, struct amdgpu_ring *ring, enum amdgpu_unmap_queues_action action, u64 gpu_addr, u64 seq); void (*kiq_query_status)(struct amdgpu_ring *kiq_ring, struct amdgpu_ring *ring, u64 addr, u64 seq); void (*kiq_invalidate_tlbs)(struct amdgpu_ring *kiq_ring, uint16_t pasid, uint32_t flush_type, bool all_hub); void (*kiq_reset_hw_queue)(struct amdgpu_ring *kiq_ring, uint32_t queue_type, uint32_t me_id, uint32_t pipe_id, uint32_t queue_id, uint32_t xcc_id, uint32_t vmid); /* Packet sizes */ int set_resources_size; int map_queues_size; int unmap_queues_size; int query_status_size; int invalidate_tlbs_size; }; struct amdgpu_kiq { u64 eop_gpu_addr; struct amdgpu_bo *eop_obj; spinlock_t ring_lock; struct amdgpu_ring ring; struct amdgpu_irq_src irq; const struct kiq_pm4_funcs *pmf; void *mqd_backup; }; /* * GFX configurations */ #define AMDGPU_GFX_MAX_SE 4 #define AMDGPU_GFX_MAX_SH_PER_SE 2 struct amdgpu_rb_config { uint32_t rb_backend_disable; uint32_t user_rb_backend_disable; uint32_t raster_config; uint32_t raster_config_1; }; struct gb_addr_config { uint16_t pipe_interleave_size; uint8_t num_pipes; uint8_t max_compress_frags; uint8_t num_banks; uint8_t num_se; uint8_t num_rb_per_se; uint8_t num_pkrs; }; struct amdgpu_gfx_config { unsigned max_shader_engines; unsigned max_tile_pipes; unsigned max_cu_per_sh; unsigned max_sh_per_se; unsigned max_backends_per_se; unsigned max_texture_channel_caches; unsigned max_gprs; unsigned max_gs_threads; unsigned max_hw_contexts; unsigned sc_prim_fifo_size_frontend; unsigned sc_prim_fifo_size_backend; unsigned sc_hiz_tile_fifo_size; unsigned sc_earlyz_tile_fifo_size; unsigned num_tile_pipes; unsigned backend_enable_mask; unsigned mem_max_burst_length_bytes; unsigned mem_row_size_in_kb; unsigned shader_engine_tile_size; unsigned num_gpus; unsigned multi_gpu_tile_size; unsigned mc_arb_ramcfg; unsigned num_banks; unsigned num_ranks; unsigned gb_addr_config; unsigned num_rbs; unsigned gs_vgt_table_depth; unsigned gs_prim_buffer_depth; uint32_t tile_mode_array[32]; uint32_t macrotile_mode_array[16]; struct gb_addr_config gb_addr_config_fields; struct amdgpu_rb_config rb_config[AMDGPU_GFX_MAX_SE][AMDGPU_GFX_MAX_SH_PER_SE]; /* gfx configure feature */ uint32_t double_offchip_lds_buf; /* cached value of DB_DEBUG2 */ uint32_t db_debug2; /* gfx10 specific config */ uint32_t num_sc_per_sh; uint32_t num_packer_per_sc; uint32_t pa_sc_tile_steering_override; /* Whether texture coordinate truncation is conformant. */ bool ta_cntl2_truncate_coord_mode; uint64_t tcc_disabled_mask; uint32_t gc_num_tcp_per_sa; uint32_t gc_num_sdp_interface; uint32_t gc_num_tcps; uint32_t gc_num_tcp_per_wpg; uint32_t gc_tcp_l1_size; uint32_t gc_num_sqc_per_wgp; uint32_t gc_l1_instruction_cache_size_per_sqc; uint32_t gc_l1_data_cache_size_per_sqc; uint32_t gc_gl1c_per_sa; uint32_t gc_gl1c_size_per_instance; uint32_t gc_gl2c_per_gpu; uint32_t gc_tcp_size_per_cu; uint32_t gc_num_cu_per_sqc; uint32_t gc_tcc_size; uint32_t gc_tcp_cache_line_size; uint32_t gc_instruction_cache_size_per_sqc; uint32_t gc_instruction_cache_line_size; uint32_t gc_scalar_data_cache_size_per_sqc; uint32_t gc_scalar_data_cache_line_size; uint32_t gc_tcc_cache_line_size; }; struct amdgpu_cu_info { uint32_t simd_per_cu; uint32_t max_waves_per_simd; uint32_t wave_front_size; uint32_t max_scratch_slots_per_cu; uint32_t lds_size; /* total active CU number */ uint32_t number; uint32_t ao_cu_mask; uint32_t ao_cu_bitmap[4][4]; uint32_t bitmap[AMDGPU_MAX_GC_INSTANCES][4][4]; }; struct amdgpu_gfx_ras { struct amdgpu_ras_block_object ras_block; void (*enable_watchdog_timer)(struct amdgpu_device *adev); int (*rlc_gc_fed_irq)(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); int (*poison_consumption_handler)(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); }; struct amdgpu_gfx_shadow_info { u32 shadow_size; u32 shadow_alignment; u32 csa_size; u32 csa_alignment; }; struct amdgpu_gfx_funcs { /* get the gpu clock counter */ uint64_t (*get_gpu_clock_counter)(struct amdgpu_device *adev); void (*select_se_sh)(struct amdgpu_device *adev, u32 se_num, u32 sh_num, u32 instance, int xcc_id); void (*read_wave_data)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t *dst, int *no_fields); void (*read_wave_vgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t thread, uint32_t start, uint32_t size, uint32_t *dst); void (*read_wave_sgprs)(struct amdgpu_device *adev, uint32_t xcc_id, uint32_t simd, uint32_t wave, uint32_t start, uint32_t size, uint32_t *dst); void (*select_me_pipe_q)(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid, u32 xcc_id); void (*init_spm_golden)(struct amdgpu_device *adev); void (*update_perfmon_mgcg)(struct amdgpu_device *adev, bool enable); int (*get_gfx_shadow_info)(struct amdgpu_device *adev, struct amdgpu_gfx_shadow_info *shadow_info); enum amdgpu_gfx_partition (*query_partition_mode)(struct amdgpu_device *adev); int (*switch_partition_mode)(struct amdgpu_device *adev, int num_xccs_per_xcp); int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node); int (*get_xccs_per_xcp)(struct amdgpu_device *adev); }; struct sq_work { struct work_struct work; unsigned ih_data; }; struct amdgpu_pfp { struct amdgpu_bo *pfp_fw_obj; uint64_t pfp_fw_gpu_addr; uint32_t *pfp_fw_ptr; struct amdgpu_bo *pfp_fw_data_obj; uint64_t pfp_fw_data_gpu_addr; uint32_t *pfp_fw_data_ptr; }; struct amdgpu_ce { struct amdgpu_bo *ce_fw_obj; uint64_t ce_fw_gpu_addr; uint32_t *ce_fw_ptr; }; struct amdgpu_me { struct amdgpu_bo *me_fw_obj; uint64_t me_fw_gpu_addr; uint32_t *me_fw_ptr; struct amdgpu_bo *me_fw_data_obj; uint64_t me_fw_data_gpu_addr; uint32_t *me_fw_data_ptr; uint32_t num_me; uint32_t num_pipe_per_me; uint32_t num_queue_per_pipe; void *mqd_backup[AMDGPU_MAX_GFX_RINGS]; /* These are the resources for which amdgpu takes ownership */ DECLARE_BITMAP(queue_bitmap, AMDGPU_MAX_GFX_QUEUES); }; struct amdgpu_isolation_work { struct amdgpu_device *adev; u32 xcp_id; struct delayed_work work; }; struct amdgpu_gfx { struct mutex gpu_clock_mutex; struct amdgpu_gfx_config config; struct amdgpu_rlc rlc; struct amdgpu_pfp pfp; struct amdgpu_ce ce; struct amdgpu_me me; struct amdgpu_mec mec; struct amdgpu_mec_bitmap mec_bitmap[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_kiq kiq[AMDGPU_MAX_GC_INSTANCES]; struct amdgpu_imu imu; bool rs64_enable; /* firmware format */ const struct firmware *me_fw; /* ME firmware */ uint32_t me_fw_version; const struct firmware *pfp_fw; /* PFP firmware */ uint32_t pfp_fw_version; const struct firmware *ce_fw; /* CE firmware */ uint32_t ce_fw_version; const struct firmware *rlc_fw; /* RLC firmware */ uint32_t rlc_fw_version; const struct firmware *mec_fw; /* MEC firmware */ uint32_t mec_fw_version; const struct firmware *mec2_fw; /* MEC2 firmware */ uint32_t mec2_fw_version; const struct firmware *imu_fw; /* IMU firmware */ uint32_t imu_fw_version; uint32_t me_feature_version; uint32_t ce_feature_version; uint32_t pfp_feature_version; uint32_t rlc_feature_version; uint32_t rlc_srlc_fw_version; uint32_t rlc_srlc_feature_version; uint32_t rlc_srlg_fw_version; uint32_t rlc_srlg_feature_version; uint32_t rlc_srls_fw_version; uint32_t rlc_srls_feature_version; uint32_t rlcp_ucode_version; uint32_t rlcp_ucode_feature_version; uint32_t rlcv_ucode_version; uint32_t rlcv_ucode_feature_version; uint32_t mec_feature_version; uint32_t mec2_feature_version; bool mec_fw_write_wait; bool me_fw_write_wait; bool cp_fw_write_wait; struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; unsigned num_gfx_rings; struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS * AMDGPU_MAX_GC_INSTANCES]; unsigned num_compute_rings; struct amdgpu_irq_src eop_irq; struct amdgpu_irq_src priv_reg_irq; struct amdgpu_irq_src priv_inst_irq; struct amdgpu_irq_src bad_op_irq; struct amdgpu_irq_src cp_ecc_error_irq; struct amdgpu_irq_src sq_irq; struct amdgpu_irq_src rlc_gc_fed_irq; struct sq_work sq_work; /* gfx status */ uint32_t gfx_current_status; /* ce ram size*/ unsigned ce_ram_size; struct amdgpu_cu_info cu_info; const struct amdgpu_gfx_funcs *funcs; /* reset mask */ uint32_t grbm_soft_reset; uint32_t srbm_soft_reset; uint32_t gfx_supported_reset; uint32_t compute_supported_reset; /* gfx off */ bool gfx_off_state; /* true: enabled, false: disabled */ struct mutex gfx_off_mutex; /* mutex to change gfxoff state */ uint32_t gfx_off_req_count; /* default 1, enable gfx off: dec 1, disable gfx off: add 1 */ struct delayed_work gfx_off_delay_work; /* async work to set gfx block off */ uint32_t gfx_off_residency; /* last logged residency */ uint64_t gfx_off_entrycount; /* count of times GPU has get into GFXOFF state */ /* pipe reservation */ struct mutex pipe_reserve_mutex; DECLARE_BITMAP (pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); /*ras */ struct ras_common_if *ras_if; struct amdgpu_gfx_ras *ras; bool is_poweron; struct amdgpu_ring sw_gfx_ring[AMDGPU_MAX_SW_GFX_RINGS]; struct amdgpu_ring_mux muxer; bool cp_gfx_shadow; /* for gfx11 */ uint16_t xcc_mask; uint32_t num_xcc_per_xcp; struct mutex partition_mutex; bool mcbp; /* mid command buffer preemption */ /* IP reg dump */ uint32_t *ip_dump_core; uint32_t *ip_dump_compute_queues; uint32_t *ip_dump_gfx_queues; struct mutex reset_sem_mutex; /* cleaner shader */ struct amdgpu_bo *cleaner_shader_obj; unsigned int cleaner_shader_size; u64 cleaner_shader_gpu_addr; void *cleaner_shader_cpu_ptr; const void *cleaner_shader_ptr; bool enable_cleaner_shader; struct amdgpu_isolation_work enforce_isolation[MAX_XCP]; /* Mutex for synchronizing KFD scheduler operations */ struct mutex kfd_sch_mutex; u64 kfd_sch_req_count[MAX_XCP]; bool kfd_sch_inactive[MAX_XCP]; unsigned long enforce_isolation_jiffies[MAX_XCP]; unsigned long enforce_isolation_time[MAX_XCP]; }; struct amdgpu_gfx_ras_reg_entry { struct amdgpu_ras_err_status_reg_entry reg_entry; enum amdgpu_gfx_ras_mem_id_type mem_id_type; uint32_t se_num; }; struct amdgpu_gfx_ras_mem_id_entry { const struct amdgpu_ras_memory_id_entry *mem_id_ent; uint32_t size; }; #define AMDGPU_GFX_MEMID_ENT(x) {(x), ARRAY_SIZE(x)}, #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) #define amdgpu_gfx_select_se_sh(adev, se, sh, instance, xcc_id) ((adev)->gfx.funcs->select_se_sh((adev), (se), (sh), (instance), (xcc_id))) #define amdgpu_gfx_select_me_pipe_q(adev, me, pipe, q, vmid, xcc_id) ((adev)->gfx.funcs->select_me_pipe_q((adev), (me), (pipe), (q), (vmid), (xcc_id))) #define amdgpu_gfx_init_spm_golden(adev) (adev)->gfx.funcs->init_spm_golden((adev)) #define amdgpu_gfx_get_gfx_shadow_info(adev, si) ((adev)->gfx.funcs->get_gfx_shadow_info((adev), (si))) /** * amdgpu_gfx_create_bitmask - create a bitmask * * @bit_width: length of the mask * * create a variable length bit mask. * Returns the bitmask. */ static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width) { return (u32)((1ULL << bit_width) - 1); } void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh); int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev, int xcc_id); void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring); void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_kiq_init(struct amdgpu_device *adev, unsigned hpd_size, int xcc_id); int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev, unsigned mqd_size, int xcc_id); void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_disable_kgq(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_enable_kgq(struct amdgpu_device *adev, int xcc_id); void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev); void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev); int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec, int pipe, int queue); void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit, int *mec, int *pipe, int *queue); bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int xcc_id, int mec, int pipe, int queue); bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); bool amdgpu_gfx_is_high_priority_graphics_queue(struct amdgpu_device *adev, struct amdgpu_ring *ring); int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me, int pipe, int queue); bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, struct ras_common_if *ras_block); void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); int amdgpu_get_gfx_off_entrycount(struct amdgpu_device *adev, u64 *value); int amdgpu_get_gfx_off_residency(struct amdgpu_device *adev, u32 *residency); int amdgpu_set_gfx_off_residency(struct amdgpu_device *adev, bool value); int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, struct amdgpu_iv_entry *entry); int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg, uint32_t xcc_id); void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t xcc_id); int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); void amdgpu_gfx_cp_init_microcode(struct amdgpu_device *adev, uint32_t ucode_id); int amdgpu_gfx_ras_sw_init(struct amdgpu_device *adev); int amdgpu_gfx_poison_consumption_handler(struct amdgpu_device *adev, struct amdgpu_iv_entry *entry); bool amdgpu_gfx_is_master_xcc(struct amdgpu_device *adev, int xcc_id); int amdgpu_gfx_sysfs_init(struct amdgpu_device *adev); void amdgpu_gfx_sysfs_fini(struct amdgpu_device *adev); void amdgpu_gfx_ras_error_func(struct amdgpu_device *adev, void *ras_error_status, void (*func)(struct amdgpu_device *adev, void *ras_error_status, int xcc_id)); int amdgpu_gfx_cleaner_shader_sw_init(struct amdgpu_device *adev, unsigned int cleaner_shader_size); void amdgpu_gfx_cleaner_shader_sw_fini(struct amdgpu_device *adev); void amdgpu_gfx_cleaner_shader_init(struct amdgpu_device *adev, unsigned int cleaner_shader_size, const void *cleaner_shader_ptr); void amdgpu_gfx_enforce_isolation_handler(struct work_struct *work); void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring); void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring); void amdgpu_debugfs_gfx_sched_mask_init(struct amdgpu_device *adev); void amdgpu_debugfs_compute_sched_mask_init(struct amdgpu_device *adev); static inline const char *amdgpu_gfx_compute_mode_desc(int mode) { switch (mode) { case AMDGPU_SPX_PARTITION_MODE: return "SPX"; case AMDGPU_DPX_PARTITION_MODE: return "DPX"; case AMDGPU_TPX_PARTITION_MODE: return "TPX"; case AMDGPU_QPX_PARTITION_MODE: return "QPX"; case AMDGPU_CPX_PARTITION_MODE: return "CPX"; default: return "UNKNOWN"; } } #endif
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _VIDEO_ATAFB_UTILS_H #define _VIDEO_ATAFB_UTILS_H /* ================================================================= */ /* Utility Assembler Functions */ /* ================================================================= */ /* ====================================================================== */ /* Those of a delicate disposition might like to skip the next couple of * pages. * * These functions are drop in replacements for memmove and * memset(_, 0, _). However their five instances add at least a kilobyte * to the object file. You have been warned. * * Not a great fan of assembler for the sake of it, but I think * that these routines are at least 10 times faster than their C * equivalents for large blits, and that's important to the lowest level of * a graphics driver. Question is whether some scheme with the blitter * would be faster. I suspect not for simple text system - not much * asynchrony. * * Code is very simple, just gruesome expansion. Basic strategy is to * increase data moved/cleared at each step to 16 bytes to reduce * instruction per data move overhead. movem might be faster still * For more than 15 bytes, we try to align the write direction on a * longword boundary to get maximum speed. This is even more gruesome. * Unaligned read/write used requires 68020+ - think this is a problem? * * Sorry! */ /* ++roman: I've optimized Robert's original versions in some minor * aspects, e.g. moveq instead of movel, let gcc choose the registers, * use movem in some places... * For other modes than 1 plane, lots of more such assembler functions * were needed (e.g. the ones using movep or expanding color values). */ /* ++andreas: more optimizations: subl #65536,d0 replaced by clrw d0; subql #1,d0 for dbcc addal is faster than addaw movep is rather expensive compared to ordinary move's some functions rewritten in C for clarity, no speed loss */ static inline void *fb_memclear_small(void *s, size_t count) { if (!count) return 0; asm volatile ("\n" " lsr.l #1,%1 ; jcc 1f ; move.b %2,-(%0)\n" "1: lsr.l #1,%1 ; jcc 1f ; move.w %2,-(%0)\n" "1: lsr.l #1,%1 ; jcc 1f ; move.l %2,-(%0)\n" "1: lsr.l #1,%1 ; jcc 1f ; move.l %2,-(%0) ; move.l %2,-(%0)\n" "1:" : "=a" (s), "=d" (count) : "d" (0), "0" ((char *)s + count), "1" (count)); asm volatile ("\n" " subq.l #1,%1\n" " jcs 3f\n" " move.l %2,%%d4; move.l %2,%%d5; move.l %2,%%d6\n" "2: movem.l %2/%%d4/%%d5/%%d6,-(%0)\n" " dbra %1,2b\n" "3:" : "=a" (s), "=d" (count) : "d" (0), "0" (s), "1" (count) : "d4", "d5", "d6" ); return 0; } static inline void *fb_memclear(void *s, size_t count) { if (!count) return 0; if (count < 16) { asm volatile ("\n" " lsr.l #1,%1 ; jcc 1f ; clr.b (%0)+\n" "1: lsr.l #1,%1 ; jcc 1f ; clr.w (%0)+\n" "1: lsr.l #1,%1 ; jcc 1f ; clr.l (%0)+\n" "1: lsr.l #1,%1 ; jcc 1f ; clr.l (%0)+ ; clr.l (%0)+\n" "1:" : "=a" (s), "=d" (count) : "0" (s), "1" (count)); } else { long tmp; asm volatile ("\n" " move.l %1,%2\n" " lsr.l #1,%2 ; jcc 1f ; clr.b (%0)+ ; subq.w #1,%1\n" " lsr.l #1,%2 ; jcs 2f\n" /* %0 increased=>bit 2 switched*/ " clr.w (%0)+ ; subq.w #2,%1 ; jra 2f\n" "1: lsr.l #1,%2 ; jcc 2f\n" " clr.w (%0)+ ; subq.w #2,%1\n" "2: move.w %1,%2; lsr.l #2,%1 ; jeq 6f\n" " lsr.l #1,%1 ; jcc 3f ; clr.l (%0)+\n" "3: lsr.l #1,%1 ; jcc 4f ; clr.l (%0)+ ; clr.l (%0)+\n" "4: subq.l #1,%1 ; jcs 6f\n" "5: clr.l (%0)+; clr.l (%0)+ ; clr.l (%0)+ ; clr.l (%0)+\n" " dbra %1,5b ; clr.w %1; subq.l #1,%1; jcc 5b\n" "6: move.w %2,%1; btst #1,%1 ; jeq 7f ; clr.w (%0)+\n" "7: btst #0,%1 ; jeq 8f ; clr.b (%0)+\n" "8:" : "=a" (s), "=d" (count), "=d" (tmp) : "0" (s), "1" (count)); } return 0; } static inline void *fb_memset255(void *s, size_t count) { if (!count) return 0; asm volatile ("\n" " lsr.l #1,%1 ; jcc 1f ; move.b %2,-(%0)\n" "1: lsr.l #1,%1 ; jcc 1f ; move.w %2,-(%0)\n" "1: lsr.l #1,%1 ; jcc 1f ; move.l %2,-(%0)\n" "1: lsr.l #1,%1 ; jcc 1f ; move.l %2,-(%0) ; move.l %2,-(%0)\n" "1:" : "=a" (s), "=d" (count) : "d" (-1), "0" ((char *)s+count), "1" (count)); asm volatile ("\n" " subq.l #1,%1 ; jcs 3f\n" " move.l %2,%%d4; move.l %2,%%d5; move.l %2,%%d6\n" "2: movem.l %2/%%d4/%%d5/%%d6,-(%0)\n" " dbra %1,2b\n" "3:" : "=a" (s), "=d" (count) : "d" (-1), "0" (s), "1" (count) : "d4", "d5", "d6"); return 0; } static inline void *fb_memmove(void *d, const void *s, size_t count) { if (d < s) { if (count < 16) { asm volatile ("\n" " lsr.l #1,%2 ; jcc 1f ; move.b (%1)+,(%0)+\n" "1: lsr.l #1,%2 ; jcc 1f ; move.w (%1)+,(%0)+\n" "1: lsr.l #1,%2 ; jcc 1f ; move.l (%1)+,(%0)+\n" "1: lsr.l #1,%2 ; jcc 1f ; move.l (%1)+,(%0)+ ; move.l (%1)+,(%0)+\n" "1:" : "=a" (d), "=a" (s), "=d" (count) : "0" (d), "1" (s), "2" (count)); } else { long tmp; asm volatile ("\n" " move.l %0,%3\n" " lsr.l #1,%3 ; jcc 1f ; move.b (%1)+,(%0)+ ; subqw #1,%2\n" " lsr.l #1,%3 ; jcs 2f\n" /* %0 increased=>bit 2 switched*/ " move.w (%1)+,(%0)+ ; subqw #2,%2 ; jra 2f\n" "1: lsr.l #1,%3 ; jcc 2f\n" " move.w (%1)+,(%0)+ ; subqw #2,%2\n" "2: move.w %2,%-; lsr.l #2,%2 ; jeq 6f\n" " lsr.l #1,%2 ; jcc 3f ; move.l (%1)+,(%0)+\n" "3: lsr.l #1,%2 ; jcc 4f ; move.l (%1)+,(%0)+ ; move.l (%1)+,(%0)+\n" "4: subq.l #1,%2 ; jcs 6f\n" "5: move.l (%1)+,(%0)+; move.l (%1)+,(%0)+\n" " move.l (%1)+,(%0)+; move.l (%1)+,(%0)+\n" " dbra %2,5b ; clr.w %2; subq.l #1,%2; jcc 5b\n" "6: move.w %+,%2; btst #1,%2 ; jeq 7f ; move.w (%1)+,(%0)+\n" "7: btst #0,%2 ; jeq 8f ; move.b (%1)+,(%0)+\n" "8:" : "=a" (d), "=a" (s), "=d" (count), "=d" (tmp) : "0" (d), "1" (s), "2" (count)); } } else { if (count < 16) { asm volatile ("\n" " lsr.l #1,%2 ; jcc 1f ; move.b -(%1),-(%0)\n" "1: lsr.l #1,%2 ; jcc 1f ; move.w -(%1),-(%0)\n" "1: lsr.l #1,%2 ; jcc 1f ; move.l -(%1),-(%0)\n" "1: lsr.l #1,%2 ; jcc 1f ; move.l -(%1),-(%0) ; move.l -(%1),-(%0)\n" "1:" : "=a" (d), "=a" (s), "=d" (count) : "0" ((char *) d + count), "1" ((char *) s + count), "2" (count)); } else { long tmp; asm volatile ("\n" " move.l %0,%3\n" " lsr.l #1,%3 ; jcc 1f ; move.b -(%1),-(%0) ; subqw #1,%2\n" " lsr.l #1,%3 ; jcs 2f\n" /* %0 increased=>bit 2 switched*/ " move.w -(%1),-(%0) ; subqw #2,%2 ; jra 2f\n" "1: lsr.l #1,%3 ; jcc 2f\n" " move.w -(%1),-(%0) ; subqw #2,%2\n" "2: move.w %2,%-; lsr.l #2,%2 ; jeq 6f\n" " lsr.l #1,%2 ; jcc 3f ; move.l -(%1),-(%0)\n" "3: lsr.l #1,%2 ; jcc 4f ; move.l -(%1),-(%0) ; move.l -(%1),-(%0)\n" "4: subq.l #1,%2 ; jcs 6f\n" "5: move.l -(%1),-(%0); move.l -(%1),-(%0)\n" " move.l -(%1),-(%0); move.l -(%1),-(%0)\n" " dbra %2,5b ; clr.w %2; subq.l #1,%2; jcc 5b\n" "6: move.w %+,%2; btst #1,%2 ; jeq 7f ; move.w -(%1),-(%0)\n" "7: btst #0,%2 ; jeq 8f ; move.b -(%1),-(%0)\n" "8:" : "=a" (d), "=a" (s), "=d" (count), "=d" (tmp) : "0" ((char *) d + count), "1" ((char *) s + count), "2" (count)); } } return 0; } /* ++andreas: Simple and fast version of memmove, assumes size is divisible by 16, suitable for moving the whole screen bitplane */ static inline void fast_memmove(char *dst, const char *src, size_t size) { if (!size) return; if (dst < src) asm volatile ("\n" "1: movem.l (%0)+,%%d0/%%d1/%%a0/%%a1\n" " movem.l %%d0/%%d1/%%a0/%%a1,%1@\n" " addq.l #8,%1; addq.l #8,%1\n" " dbra %2,1b\n" " clr.w %2; subq.l #1,%2\n" " jcc 1b" : "=a" (src), "=a" (dst), "=d" (size) : "0" (src), "1" (dst), "2" (size / 16 - 1) : "d0", "d1", "a0", "a1", "memory"); else asm volatile ("\n" "1: subq.l #8,%0; subq.l #8,%0\n" " movem.l %0@,%%d0/%%d1/%%a0/%%a1\n" " movem.l %%d0/%%d1/%%a0/%%a1,-(%1)\n" " dbra %2,1b\n" " clr.w %2; subq.l #1,%2\n" " jcc 1b" : "=a" (src), "=a" (dst), "=d" (size) : "0" (src + size), "1" (dst + size), "2" (size / 16 - 1) : "d0", "d1", "a0", "a1", "memory"); } #ifdef BPL /* * This expands a up to 8 bit color into two longs * for movel operations. */ static const u32 four2long[] = { 0x00000000, 0x000000ff, 0x0000ff00, 0x0000ffff, 0x00ff0000, 0x00ff00ff, 0x00ffff00, 0x00ffffff, 0xff000000, 0xff0000ff, 0xff00ff00, 0xff00ffff, 0xffff0000, 0xffff00ff, 0xffffff00, 0xffffffff, }; static inline void expand8_col2mask(u8 c, u32 m[]) { m[0] = four2long[c & 15]; #if BPL > 4 m[1] = four2long[c >> 4]; #endif } static inline void expand8_2col2mask(u8 fg, u8 bg, u32 fgm[], u32 bgm[]) { fgm[0] = four2long[fg & 15] ^ (bgm[0] = four2long[bg & 15]); #if BPL > 4 fgm[1] = four2long[fg >> 4] ^ (bgm[1] = four2long[bg >> 4]); #endif } /* * set an 8bit value to a color */ static inline void fill8_col(u8 *dst, u32 m[]) { u32 tmp = m[0]; dst[0] = tmp; dst[2] = (tmp >>= 8); #if BPL > 2 dst[4] = (tmp >>= 8); dst[6] = tmp >> 8; #endif #if BPL > 4 tmp = m[1]; dst[8] = tmp; dst[10] = (tmp >>= 8); dst[12] = (tmp >>= 8); dst[14] = tmp >> 8; #endif } /* * set an 8bit value according to foreground/background color */ static inline void fill8_2col(u8 *dst, u8 fg, u8 bg, u32 mask) { u32 fgm[2], bgm[2], tmp; expand8_2col2mask(fg, bg, fgm, bgm); mask |= mask << 8; #if BPL > 2 mask |= mask << 16; #endif tmp = (mask & fgm[0]) ^ bgm[0]; dst[0] = tmp; dst[2] = (tmp >>= 8); #if BPL > 2 dst[4] = (tmp >>= 8); dst[6] = tmp >> 8; #endif #if BPL > 4 tmp = (mask & fgm[1]) ^ bgm[1]; dst[8] = tmp; dst[10] = (tmp >>= 8); dst[12] = (tmp >>= 8); dst[14] = tmp >> 8; #endif } static const u32 two2word[] = { 0x00000000, 0xffff0000, 0x0000ffff, 0xffffffff }; static inline void expand16_col2mask(u8 c, u32 m[]) { m[0] = two2word[c & 3]; #if BPL > 2 m[1] = two2word[(c >> 2) & 3]; #endif #if BPL > 4 m[2] = two2word[(c >> 4) & 3]; m[3] = two2word[c >> 6]; #endif } static inline void expand16_2col2mask(u8 fg, u8 bg, u32 fgm[], u32 bgm[]) { bgm[0] = two2word[bg & 3]; fgm[0] = two2word[fg & 3] ^ bgm[0]; #if BPL > 2 bgm[1] = two2word[(bg >> 2) & 3]; fgm[1] = two2word[(fg >> 2) & 3] ^ bgm[1]; #endif #if BPL > 4 bgm[2] = two2word[(bg >> 4) & 3]; fgm[2] = two2word[(fg >> 4) & 3] ^ bgm[2]; bgm[3] = two2word[bg >> 6]; fgm[3] = two2word[fg >> 6] ^ bgm[3]; #endif } static inline u32 *fill16_col(u32 *dst, int rows, u32 m[]) { while (rows) { *dst++ = m[0]; #if BPL > 2 *dst++ = m[1]; #endif #if BPL > 4 *dst++ = m[2]; *dst++ = m[3]; #endif rows--; } return dst; } static inline void memmove32_col(void *dst, void *src, u32 mask, u32 h, u32 bytes) { u32 *s, *d, v; s = src; d = dst; do { v = (*s++ & mask) | (*d & ~mask); *d++ = v; #if BPL > 2 v = (*s++ & mask) | (*d & ~mask); *d++ = v; #endif #if BPL > 4 v = (*s++ & mask) | (*d & ~mask); *d++ = v; v = (*s++ & mask) | (*d & ~mask); *d++ = v; #endif d = (u32 *)((u8 *)d + bytes); s = (u32 *)((u8 *)s + bytes); } while (--h); } #endif #endif /* _VIDEO_ATAFB_UTILS_H */
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright (C) 2023 Arm Ltd. */ /dts-v1/; #include "sun50i-h616.dtsi" #include "sun50i-h616-cpu-opp.dtsi" #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/leds/common.h> / { model = "OrangePi Zero 2W"; compatible = "xunlong,orangepi-zero2w", "allwinner,sun50i-h618"; aliases { serial0 = &uart0; }; chosen { stdout-path = "serial0:115200n8"; }; leds { compatible = "gpio-leds"; led-0 { function = LED_FUNCTION_STATUS; color = <LED_COLOR_ID_GREEN>; gpios = <&pio 2 13 GPIO_ACTIVE_HIGH>; /* PC13 */ }; }; reg_vcc5v: vcc5v { /* board wide 5V supply directly from the USB-C socket */ compatible = "regulator-fixed"; regulator-name = "vcc-5v"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; regulator-always-on; }; reg_vcc3v3: vcc3v3 { /* SY8089 DC/DC converter */ compatible = "regulator-fixed"; regulator-name = "vcc-3v3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; vin-supply = <&reg_vcc5v>; regulator-always-on; }; }; &codec { allwinner,audio-routing = "Line Out", "LINEOUT"; status = "okay"; }; &cpu0 { cpu-supply = <&reg_dcdc2>; }; &ehci1 { status = "okay"; }; /* USB 2 & 3 are on the FPC connector (or the exansion board) */ &mmc0 { cd-gpios = <&pio 5 6 GPIO_ACTIVE_LOW>; /* PF6 */ bus-width = <4>; vmmc-supply = <&reg_vcc3v3>; status = "okay"; }; &ohci1 { status = "okay"; }; &pio { vcc-pc-supply = <&reg_dldo1>; vcc-pf-supply = <&reg_dldo1>; /* internally via VCC-IO */ vcc-pg-supply = <&reg_aldo1>; vcc-ph-supply = <&reg_dldo1>; /* internally via VCC-IO */ vcc-pi-supply = <&reg_dldo1>; }; &r_i2c { status = "okay"; axp313: pmic@36 { compatible = "x-powers,axp313a"; reg = <0x36>; #interrupt-cells = <1>; interrupt-controller; interrupt-parent = <&pio>; interrupts = <2 9 IRQ_TYPE_LEVEL_LOW>; /* PC9 */ vin1-supply = <&reg_vcc5v>; vin2-supply = <&reg_vcc5v>; vin3-supply = <&reg_vcc5v>; regulators { /* Supplies VCC-PLL and DRAM */ reg_aldo1: aldo1 { regulator-always-on; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-name = "vcc1v8"; }; /* Supplies VCC-IO, so needs to be always on. */ reg_dldo1: dldo1 { regulator-always-on; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; regulator-name = "vcc3v3"; }; reg_dcdc1: dcdc1 { regulator-always-on; regulator-min-microvolt = <810000>; regulator-max-microvolt = <990000>; regulator-name = "vdd-gpu-sys"; }; reg_dcdc2: dcdc2 { regulator-always-on; regulator-min-microvolt = <810000>; regulator-max-microvolt = <1100000>; regulator-name = "vdd-cpu"; }; reg_dcdc3: dcdc3 { regulator-always-on; regulator-min-microvolt = <1100000>; regulator-max-microvolt = <1100000>; regulator-name = "vdd-dram"; }; }; }; }; &spi0 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&spi0_pins>, <&spi0_cs0_pin>; flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <40000000>; }; }; &uart0 { pinctrl-names = "default"; pinctrl-0 = <&uart0_ph_pins>; status = "okay"; }; &usbotg { /* * PHY0 pins are connected to a USB-C socket, but a role switch * is not implemented: both CC pins are pulled to GND. * The VBUS pins power the device, so a fixed peripheral mode * is the best choice. * The board can be powered via GPIOs, in this case port0 *can* * act as a host (with a cable/adapter ignoring CC), as VBUS is * then provided by the GPIOs. Any user of this setup would * need to adjust the DT accordingly: dr_mode set to "host", * enabling OHCI0 and EHCI0. */ dr_mode = "peripheral"; status = "okay"; }; &usbphy { usb1_vbus-supply = <&reg_vcc5v>; status = "okay"; };
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Device driver for the SYMBIOS/LSILOGIC 53C8XX and 53C1010 family * of PCI-SCSI IO processors. * * Copyright (C) 1999-2001 Gerard Roudier <[email protected]> * * This driver is derived from the Linux sym53c8xx driver. * Copyright (C) 1998-2000 Gerard Roudier * * The sym53c8xx driver is derived from the ncr53c8xx driver that had been * a port of the FreeBSD ncr driver to Linux-1.2.13. * * The original ncr driver has been written for 386bsd and FreeBSD by * Wolfgang Stanglmeier <[email protected]> * Stefan Esser <[email protected]> * Copyright (C) 1994 Wolfgang Stanglmeier * * Other major contributions: * * NVRAM detection and reading. * Copyright (C) 1997 Richard Waltham <[email protected]> * *----------------------------------------------------------------------------- */ #ifndef SYM_MISC_H #define SYM_MISC_H /* * A la VMS/CAM-3 queue management. */ typedef struct sym_quehead { struct sym_quehead *flink; /* Forward pointer */ struct sym_quehead *blink; /* Backward pointer */ } SYM_QUEHEAD; #define sym_que_init(ptr) do { \ (ptr)->flink = (ptr); (ptr)->blink = (ptr); \ } while (0) static inline struct sym_quehead *sym_que_first(struct sym_quehead *head) { return (head->flink == head) ? 0 : head->flink; } static inline struct sym_quehead *sym_que_last(struct sym_quehead *head) { return (head->blink == head) ? 0 : head->blink; } static inline void __sym_que_add(struct sym_quehead * new, struct sym_quehead * blink, struct sym_quehead * flink) { flink->blink = new; new->flink = flink; new->blink = blink; blink->flink = new; } static inline void __sym_que_del(struct sym_quehead * blink, struct sym_quehead * flink) { flink->blink = blink; blink->flink = flink; } static inline int sym_que_empty(struct sym_quehead *head) { return head->flink == head; } static inline void sym_que_splice(struct sym_quehead *list, struct sym_quehead *head) { struct sym_quehead *first = list->flink; if (first != list) { struct sym_quehead *last = list->blink; struct sym_quehead *at = head->flink; first->blink = head; head->flink = first; last->flink = at; at->blink = last; } } static inline void sym_que_move(struct sym_quehead *orig, struct sym_quehead *dest) { struct sym_quehead *first, *last; first = orig->flink; if (first != orig) { first->blink = dest; dest->flink = first; last = orig->blink; last->flink = dest; dest->blink = last; orig->flink = orig; orig->blink = orig; } else { dest->flink = dest; dest->blink = dest; } } #define sym_que_entry(ptr, type, member) container_of(ptr, type, member) #define sym_insque(new, pos) __sym_que_add(new, pos, (pos)->flink) #define sym_remque(el) __sym_que_del((el)->blink, (el)->flink) #define sym_insque_head(new, head) __sym_que_add(new, head, (head)->flink) static inline struct sym_quehead *sym_remque_head(struct sym_quehead *head) { struct sym_quehead *elem = head->flink; if (elem != head) __sym_que_del(head, elem->flink); else elem = NULL; return elem; } #define sym_insque_tail(new, head) __sym_que_add(new, (head)->blink, head) static inline struct sym_quehead *sym_remque_tail(struct sym_quehead *head) { struct sym_quehead *elem = head->blink; if (elem != head) __sym_que_del(elem->blink, head); else elem = 0; return elem; } /* * This one may be useful. */ #define FOR_EACH_QUEUED_ELEMENT(head, qp) \ for (qp = (head)->flink; qp != (head); qp = qp->flink) /* * FreeBSD does not offer our kind of queue in the CAM CCB. * So, we have to cast. */ #define sym_qptr(p) ((struct sym_quehead *) (p)) /* * Simple bitmap operations. */ #define sym_set_bit(p, n) (((u32 *)(p))[(n)>>5] |= (1<<((n)&0x1f))) #define sym_clr_bit(p, n) (((u32 *)(p))[(n)>>5] &= ~(1<<((n)&0x1f))) #define sym_is_bit(p, n) (((u32 *)(p))[(n)>>5] & (1<<((n)&0x1f))) /* * The below round up/down macros are to be used with a constant * as argument (sizeof(...) for example), for the compiler to * optimize the whole thing. */ #define _U_(a,m) (a)<=(1<<m)?m: /* * Round up logarithm to base 2 of a 16 bit constant. */ #define _LGRU16_(a) \ ( \ _U_(a, 0)_U_(a, 1)_U_(a, 2)_U_(a, 3)_U_(a, 4)_U_(a, 5)_U_(a, 6)_U_(a, 7) \ _U_(a, 8)_U_(a, 9)_U_(a,10)_U_(a,11)_U_(a,12)_U_(a,13)_U_(a,14)_U_(a,15) \ 16) #endif /* SYM_MISC_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) ST-Ericsson SA 2010 * * Author: Arun R Murthy <[email protected]> * Datasheet: https://web.archive.org/web/20130614115108/http://www.stericsson.com/developers/CD00291561_UM1031_AB8500_user_manual-rev5_CTDS_public.pdf */ #include <linux/err.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/pwm.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/module.h> /* * PWM Out generators * Bank: 0x10 */ #define AB8500_PWM_OUT_CTRL1_REG 0x60 #define AB8500_PWM_OUT_CTRL2_REG 0x61 #define AB8500_PWM_OUT_CTRL7_REG 0x66 #define AB8500_PWM_CLKRATE 9600000 struct ab8500_pwm_chip { unsigned int hwid; }; static struct ab8500_pwm_chip *ab8500_pwm_from_chip(struct pwm_chip *chip) { return pwmchip_get_drvdata(chip); } static int ab8500_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { int ret; u8 reg; u8 higher_val, lower_val; unsigned int duty_steps, div; struct ab8500_pwm_chip *ab8500 = ab8500_pwm_from_chip(chip); if (state->polarity != PWM_POLARITY_NORMAL) return -EINVAL; if (state->enabled) { /* * A time quantum is * q = (32 - FreqPWMOutx[3:0]) / AB8500_PWM_CLKRATE * The period is always 1024 q, duty_cycle is between 1q and 1024q. * * FreqPWMOutx[3:0] | output frequency | output frequency | 1024q = period * | (from manual) | (1 / 1024q) | = 1 / freq * -----------------+------------------+------------------+-------------- * b0000 | 293 Hz | 292.968750 Hz | 3413333.33 ns * b0001 | 302 Hz | 302.419355 Hz | 3306666.66 ns * b0010 | 312 Hz | 312.500000 Hz | 3200000 ns * b0011 | 323 Hz | 323.275862 Hz | 3093333.33 ns * b0100 | 334 Hz | 334.821429 Hz | 2986666.66 ns * b0101 | 347 Hz | 347.222222 Hz | 2880000 ns * b0110 | 360 Hz | 360.576923 Hz | 2773333.33 ns * b0111 | 375 Hz | 375.000000 Hz | 2666666.66 ns * b1000 | 390 Hz | 390.625000 Hz | 2560000 ns * b1001 | 407 Hz | 407.608696 Hz | 2453333.33 ns * b1010 | 426 Hz | 426.136364 Hz | 2346666.66 ns * b1011 | 446 Hz | 446.428571 Hz | 2240000 ns * b1100 | 468 Hz | 468.750000 Hz | 2133333.33 ns * b1101 | 493 Hz | 493.421053 Hz | 2026666.66 ns * b1110 | 520 Hz | 520.833333 Hz | 1920000 ns * b1111 | 551 Hz | 551.470588 Hz | 1813333.33 ns * * * AB8500_PWM_CLKRATE is a multiple of 1024, so the division by * 1024 can be done in this factor without loss of precision. */ div = min_t(u64, mul_u64_u64_div_u64(state->period, AB8500_PWM_CLKRATE >> 10, NSEC_PER_SEC), 32); /* 32 - FreqPWMOutx[3:0] */ if (div <= 16) /* requested period < 3413333.33 */ return -EINVAL; duty_steps = max_t(u64, mul_u64_u64_div_u64(state->duty_cycle, AB8500_PWM_CLKRATE, (u64)NSEC_PER_SEC * div), 1024); } /* * The hardware doesn't support duty_steps = 0 explicitly, but emits low * when disabled. */ if (!state->enabled || duty_steps == 0) { ret = abx500_mask_and_set_register_interruptible(pwmchip_parent(chip), AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, 1 << ab8500->hwid, 0); if (ret < 0) dev_err(pwmchip_parent(chip), "%s: Failed to disable PWM, Error %d\n", pwm->label, ret); return ret; } /* * The lower 8 bits of duty_steps is written to ... * AB8500_PWM_OUT_CTRL1_REG[0:7] */ lower_val = (duty_steps - 1) & 0x00ff; /* * The two remaining high bits to * AB8500_PWM_OUT_CTRL2_REG[0:1]; together with FreqPWMOutx. */ higher_val = ((duty_steps - 1) & 0x0300) >> 8 | (32 - div) << 4; reg = AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2); ret = abx500_set_register_interruptible(pwmchip_parent(chip), AB8500_MISC, reg, lower_val); if (ret < 0) return ret; ret = abx500_set_register_interruptible(pwmchip_parent(chip), AB8500_MISC, (reg + 1), higher_val); if (ret < 0) return ret; /* enable */ ret = abx500_mask_and_set_register_interruptible(pwmchip_parent(chip), AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, 1 << ab8500->hwid, 1 << ab8500->hwid); if (ret < 0) dev_err(pwmchip_parent(chip), "%s: Failed to enable PWM, Error %d\n", pwm->label, ret); return ret; } static int ab8500_pwm_get_state(struct pwm_chip *chip, struct pwm_device *pwm, struct pwm_state *state) { u8 ctrl7, lower_val, higher_val; int ret; struct ab8500_pwm_chip *ab8500 = ab8500_pwm_from_chip(chip); unsigned int div, duty_steps; ret = abx500_get_register_interruptible(pwmchip_parent(chip), AB8500_MISC, AB8500_PWM_OUT_CTRL7_REG, &ctrl7); if (ret) return ret; state->polarity = PWM_POLARITY_NORMAL; if (!(ctrl7 & 1 << ab8500->hwid)) { state->enabled = false; return 0; } ret = abx500_get_register_interruptible(pwmchip_parent(chip), AB8500_MISC, AB8500_PWM_OUT_CTRL1_REG + (ab8500->hwid * 2), &lower_val); if (ret) return ret; ret = abx500_get_register_interruptible(pwmchip_parent(chip), AB8500_MISC, AB8500_PWM_OUT_CTRL2_REG + (ab8500->hwid * 2), &higher_val); if (ret) return ret; div = 32 - ((higher_val & 0xf0) >> 4); duty_steps = ((higher_val & 3) << 8 | lower_val) + 1; state->period = DIV64_U64_ROUND_UP((u64)div << 10, AB8500_PWM_CLKRATE); state->duty_cycle = DIV64_U64_ROUND_UP((u64)div * duty_steps, AB8500_PWM_CLKRATE); return 0; } static const struct pwm_ops ab8500_pwm_ops = { .apply = ab8500_pwm_apply, .get_state = ab8500_pwm_get_state, }; static int ab8500_pwm_probe(struct platform_device *pdev) { struct pwm_chip *chip; struct ab8500_pwm_chip *ab8500; int err; if (pdev->id < 1 || pdev->id > 31) return dev_err_probe(&pdev->dev, -EINVAL, "Invalid device id %d\n", pdev->id); /* * Nothing to be done in probe, this is required to get the * device which is required for ab8500 read and write */ chip = devm_pwmchip_alloc(&pdev->dev, 1, sizeof(*ab8500)); if (IS_ERR(chip)) return PTR_ERR(chip); ab8500 = ab8500_pwm_from_chip(chip); chip->ops = &ab8500_pwm_ops; ab8500->hwid = pdev->id - 1; err = devm_pwmchip_add(&pdev->dev, chip); if (err < 0) return dev_err_probe(&pdev->dev, err, "Failed to add pwm chip\n"); dev_dbg(&pdev->dev, "pwm probe successful\n"); return 0; } static struct platform_driver ab8500_pwm_driver = { .driver = { .name = "ab8500-pwm", }, .probe = ab8500_pwm_probe, }; module_platform_driver(ab8500_pwm_driver); MODULE_AUTHOR("Arun MURTHY <[email protected]>"); MODULE_DESCRIPTION("AB8500 Pulse Width Modulation Driver"); MODULE_ALIAS("platform:ab8500-pwm"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0 /* * ISP1362 HCD (Host Controller Driver) for USB. * * Copyright (C) 2005 Lothar Wassmann <[email protected]> * * Derived from the SL811 HCD, rewritten for ISP116x. * Copyright (C) 2005 Olav Kongas <[email protected]> * * Portions: * Copyright (C) 2004 Psion Teklogix (for NetBook PRO) * Copyright (C) 2004 David Brownell */ /* * The ISP1362 chip requires a large delay (300ns and 462ns) between * accesses to the address and data register. * The following timing options exist: * * 1. Configure your memory controller to add such delays if it can (the best) * 2. Implement platform-specific delay function possibly * combined with configuring the memory controller; see * include/linux/usb_isp1362.h for more info. * 3. Use ndelay (easiest, poorest). * * Use the corresponding macros USE_PLATFORM_DELAY and USE_NDELAY in the * platform specific section of isp1362.h to select the appropriate variant. * * Also note that according to the Philips "ISP1362 Errata" document * Rev 1.00 from 27 May data corruption may occur when the #WR signal * is reasserted (even with #CS deasserted) within 132ns after a * write cycle to any controller register. If the hardware doesn't * implement the recommended fix (gating the #WR with #CS) software * must ensure that no further write cycle (not necessarily to the chip!) * is issued by the CPU within this interval. * For PXA25x this can be ensured by using VLIO with the maximum * recovery time (MSCx = 0x7f8c) with a memory clock of 99.53 MHz. */ #undef ISP1362_DEBUG /* * The PXA255 UDC apparently doesn't handle GET_STATUS, GET_CONFIG and * GET_INTERFACE requests correctly when the SETUP and DATA stages of the * requests are carried out in separate frames. This will delay any SETUP * packets until the start of the next frame so that this situation is * unlikely to occur (and makes usbtest happy running with a PXA255 target * device). */ #undef BUGGY_PXA2XX_UDC_USBTEST #undef PTD_TRACE #undef URB_TRACE #undef VERBOSE #undef REGISTERS /* This enables a memory test on the ISP1362 chip memory to make sure the * chip access timing is correct. */ #undef CHIP_BUFFER_TEST #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <linux/usb/isp1362.h> #include <linux/usb/hcd.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/io.h> #include <linux/bitmap.h> #include <linux/prefetch.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <asm/irq.h> #include <asm/byteorder.h> #include <linux/unaligned.h> static int dbg_level; #ifdef ISP1362_DEBUG module_param(dbg_level, int, 0644); #else module_param(dbg_level, int, 0); #endif #include "../core/usb.h" #include "isp1362.h" #define DRIVER_VERSION "2005-04-04" #define DRIVER_DESC "ISP1362 USB Host Controller Driver" MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static const char hcd_name[] = "isp1362-hcd"; static void isp1362_hc_stop(struct usb_hcd *hcd); static int isp1362_hc_start(struct usb_hcd *hcd); /*-------------------------------------------------------------------------*/ /* * When called from the interrupthandler only isp1362_hcd->irqenb is modified, * since the interrupt handler will write isp1362_hcd->irqenb to HCuPINT upon * completion. * We don't need a 'disable' counterpart, since interrupts will be disabled * only by the interrupt handler. */ static inline void isp1362_enable_int(struct isp1362_hcd *isp1362_hcd, u16 mask) { if ((isp1362_hcd->irqenb | mask) == isp1362_hcd->irqenb) return; if (mask & ~isp1362_hcd->irqenb) isp1362_write_reg16(isp1362_hcd, HCuPINT, mask & ~isp1362_hcd->irqenb); isp1362_hcd->irqenb |= mask; if (isp1362_hcd->irq_active) return; isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); } /*-------------------------------------------------------------------------*/ static inline struct isp1362_ep_queue *get_ptd_queue(struct isp1362_hcd *isp1362_hcd, u16 offset) { struct isp1362_ep_queue *epq = NULL; if (offset < isp1362_hcd->istl_queue[1].buf_start) epq = &isp1362_hcd->istl_queue[0]; else if (offset < isp1362_hcd->intl_queue.buf_start) epq = &isp1362_hcd->istl_queue[1]; else if (offset < isp1362_hcd->atl_queue.buf_start) epq = &isp1362_hcd->intl_queue; else if (offset < isp1362_hcd->atl_queue.buf_start + isp1362_hcd->atl_queue.buf_size) epq = &isp1362_hcd->atl_queue; if (epq) DBG(1, "%s: PTD $%04x is on %s queue\n", __func__, offset, epq->name); else pr_warn("%s: invalid PTD $%04x\n", __func__, offset); return epq; } static inline int get_ptd_offset(struct isp1362_ep_queue *epq, u8 index) { int offset; if (index * epq->blk_size > epq->buf_size) { pr_warn("%s: Bad %s index %d(%d)\n", __func__, epq->name, index, epq->buf_size / epq->blk_size); return -EINVAL; } offset = epq->buf_start + index * epq->blk_size; DBG(3, "%s: %s PTD[%02x] # %04x\n", __func__, epq->name, index, offset); return offset; } /*-------------------------------------------------------------------------*/ static inline u16 max_transfer_size(struct isp1362_ep_queue *epq, size_t size, int mps) { u16 xfer_size = min_t(size_t, MAX_XFER_SIZE, size); xfer_size = min_t(size_t, xfer_size, epq->buf_avail * epq->blk_size - PTD_HEADER_SIZE); if (xfer_size < size && xfer_size % mps) xfer_size -= xfer_size % mps; return xfer_size; } static int claim_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep, u16 len) { int ptd_offset = -EINVAL; int num_ptds = ((len + PTD_HEADER_SIZE - 1) / epq->blk_size) + 1; int found; BUG_ON(len > epq->buf_size); if (!epq->buf_avail) return -ENOMEM; if (ep->num_ptds) pr_err("%s: %s len %d/%d num_ptds %d buf_map %08lx skip_map %08lx\n", __func__, epq->name, len, epq->blk_size, num_ptds, epq->buf_map, epq->skip_map); BUG_ON(ep->num_ptds != 0); found = bitmap_find_next_zero_area(&epq->buf_map, epq->buf_count, 0, num_ptds, 0); if (found >= epq->buf_count) return -EOVERFLOW; DBG(1, "%s: Found %d PTDs[%d] for %d/%d byte\n", __func__, num_ptds, found, len, (int)(epq->blk_size - PTD_HEADER_SIZE)); ptd_offset = get_ptd_offset(epq, found); WARN_ON(ptd_offset < 0); ep->ptd_offset = ptd_offset; ep->num_ptds += num_ptds; epq->buf_avail -= num_ptds; BUG_ON(epq->buf_avail > epq->buf_count); ep->ptd_index = found; bitmap_set(&epq->buf_map, found, num_ptds); DBG(1, "%s: Done %s PTD[%d] $%04x, avail %d count %d claimed %d %08lx:%08lx\n", __func__, epq->name, ep->ptd_index, ep->ptd_offset, epq->buf_avail, epq->buf_count, num_ptds, epq->buf_map, epq->skip_map); return found; } static inline void release_ptd_buffers(struct isp1362_ep_queue *epq, struct isp1362_ep *ep) { int last = ep->ptd_index + ep->num_ptds; if (last > epq->buf_count) pr_err("%s: ep %p req %d len %d %s PTD[%d] $%04x num_ptds %d buf_count %d buf_avail %d buf_map %08lx skip_map %08lx\n", __func__, ep, ep->num_req, ep->length, epq->name, ep->ptd_index, ep->ptd_offset, ep->num_ptds, epq->buf_count, epq->buf_avail, epq->buf_map, epq->skip_map); BUG_ON(last > epq->buf_count); bitmap_clear(&epq->buf_map, ep->ptd_index, ep->num_ptds); bitmap_set(&epq->skip_map, ep->ptd_index, ep->num_ptds); epq->buf_avail += ep->num_ptds; epq->ptd_count--; BUG_ON(epq->buf_avail > epq->buf_count); BUG_ON(epq->ptd_count > epq->buf_count); DBG(1, "%s: Done %s PTDs $%04x released %d avail %d count %d\n", __func__, epq->name, ep->ptd_offset, ep->num_ptds, epq->buf_avail, epq->buf_count); DBG(1, "%s: buf_map %08lx skip_map %08lx\n", __func__, epq->buf_map, epq->skip_map); ep->num_ptds = 0; ep->ptd_offset = -EINVAL; ep->ptd_index = -EINVAL; } /*-------------------------------------------------------------------------*/ /* Set up PTD's. */ static void prepare_ptd(struct isp1362_hcd *isp1362_hcd, struct urb *urb, struct isp1362_ep *ep, struct isp1362_ep_queue *epq, u16 fno) { struct ptd *ptd; int toggle; int dir; u16 len; size_t buf_len = urb->transfer_buffer_length - urb->actual_length; DBG(3, "%s: %s ep %p\n", __func__, epq->name, ep); ptd = &ep->ptd; ep->data = (unsigned char *)urb->transfer_buffer + urb->actual_length; switch (ep->nextpid) { case USB_PID_IN: toggle = usb_gettoggle(urb->dev, ep->epnum, 0); dir = PTD_DIR_IN; if (usb_pipecontrol(urb->pipe)) { len = min_t(size_t, ep->maxpacket, buf_len); } else if (usb_pipeisoc(urb->pipe)) { len = min_t(size_t, urb->iso_frame_desc[fno].length, MAX_XFER_SIZE); ep->data = urb->transfer_buffer + urb->iso_frame_desc[fno].offset; } else len = max_transfer_size(epq, buf_len, ep->maxpacket); DBG(1, "%s: IN len %d/%d/%d from URB\n", __func__, len, ep->maxpacket, (int)buf_len); break; case USB_PID_OUT: toggle = usb_gettoggle(urb->dev, ep->epnum, 1); dir = PTD_DIR_OUT; if (usb_pipecontrol(urb->pipe)) len = min_t(size_t, ep->maxpacket, buf_len); else if (usb_pipeisoc(urb->pipe)) len = min_t(size_t, urb->iso_frame_desc[0].length, MAX_XFER_SIZE); else len = max_transfer_size(epq, buf_len, ep->maxpacket); if (len == 0) pr_info("%s: Sending ZERO packet: %d\n", __func__, urb->transfer_flags & URB_ZERO_PACKET); DBG(1, "%s: OUT len %d/%d/%d from URB\n", __func__, len, ep->maxpacket, (int)buf_len); break; case USB_PID_SETUP: toggle = 0; dir = PTD_DIR_SETUP; len = sizeof(struct usb_ctrlrequest); DBG(1, "%s: SETUP len %d\n", __func__, len); ep->data = urb->setup_packet; break; case USB_PID_ACK: toggle = 1; len = 0; dir = (urb->transfer_buffer_length && usb_pipein(urb->pipe)) ? PTD_DIR_OUT : PTD_DIR_IN; DBG(1, "%s: ACK len %d\n", __func__, len); break; default: toggle = dir = len = 0; pr_err("%s@%d: ep->nextpid %02x\n", __func__, __LINE__, ep->nextpid); BUG_ON(1); } ep->length = len; if (!len) ep->data = NULL; ptd->count = PTD_CC_MSK | PTD_ACTIVE_MSK | PTD_TOGGLE(toggle); ptd->mps = PTD_MPS(ep->maxpacket) | PTD_SPD(urb->dev->speed == USB_SPEED_LOW) | PTD_EP(ep->epnum); ptd->len = PTD_LEN(len) | PTD_DIR(dir); ptd->faddr = PTD_FA(usb_pipedevice(urb->pipe)); if (usb_pipeint(urb->pipe)) { ptd->faddr |= PTD_SF_INT(ep->branch); ptd->faddr |= PTD_PR(ep->interval ? __ffs(ep->interval) : 0); } if (usb_pipeisoc(urb->pipe)) ptd->faddr |= PTD_SF_ISO(fno); DBG(1, "%s: Finished\n", __func__); } static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, struct isp1362_ep_queue *epq) { struct ptd *ptd = &ep->ptd; int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length; prefetch(ptd); isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); if (len) isp1362_write_buffer(isp1362_hcd, ep->data, ep->ptd_offset + PTD_HEADER_SIZE, len); dump_ptd(ptd); dump_ptd_out_data(ptd, ep->data); } static void isp1362_read_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, struct isp1362_ep_queue *epq) { struct ptd *ptd = &ep->ptd; int act_len; WARN_ON(list_empty(&ep->active)); BUG_ON(ep->ptd_offset < 0); list_del_init(&ep->active); DBG(1, "%s: ep %p removed from active list %p\n", __func__, ep, &epq->active); prefetchw(ptd); isp1362_read_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); dump_ptd(ptd); act_len = PTD_GET_COUNT(ptd); if (PTD_GET_DIR(ptd) != PTD_DIR_IN || act_len == 0) return; if (act_len > ep->length) pr_err("%s: ep %p PTD $%04x act_len %d ep->length %d\n", __func__, ep, ep->ptd_offset, act_len, ep->length); BUG_ON(act_len > ep->length); /* Only transfer the amount of data that has actually been overwritten * in the chip buffer. We don't want any data that doesn't belong to the * transfer to leak out of the chip to the callers transfer buffer! */ prefetchw(ep->data); isp1362_read_buffer(isp1362_hcd, ep->data, ep->ptd_offset + PTD_HEADER_SIZE, act_len); dump_ptd_in_data(ptd, ep->data); } /* * INT PTDs will stay in the chip until data is available. * This function will remove a PTD from the chip when the URB is dequeued. * Must be called with the spinlock held and IRQs disabled */ static void remove_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) { int index; struct isp1362_ep_queue *epq; DBG(1, "%s: ep %p PTD[%d] $%04x\n", __func__, ep, ep->ptd_index, ep->ptd_offset); BUG_ON(ep->ptd_offset < 0); epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset); BUG_ON(!epq); /* put ep in remove_list for cleanup */ WARN_ON(!list_empty(&ep->remove_list)); list_add_tail(&ep->remove_list, &isp1362_hcd->remove_list); /* let SOF interrupt handle the cleanup */ isp1362_enable_int(isp1362_hcd, HCuPINT_SOF); index = ep->ptd_index; if (index < 0) /* ISO queues don't have SKIP registers */ return; DBG(1, "%s: Disabling PTD[%02x] $%04x %08lx|%08x\n", __func__, index, ep->ptd_offset, epq->skip_map, 1 << index); /* prevent further processing of PTD (will be effective after next SOF) */ epq->skip_map |= 1 << index; if (epq == &isp1362_hcd->atl_queue) { DBG(2, "%s: ATLSKIP = %08x -> %08lx\n", __func__, isp1362_read_reg32(isp1362_hcd, HCATLSKIP), epq->skip_map); isp1362_write_reg32(isp1362_hcd, HCATLSKIP, epq->skip_map); if (~epq->skip_map == 0) isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); } else if (epq == &isp1362_hcd->intl_queue) { DBG(2, "%s: INTLSKIP = %08x -> %08lx\n", __func__, isp1362_read_reg32(isp1362_hcd, HCINTLSKIP), epq->skip_map); isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, epq->skip_map); if (~epq->skip_map == 0) isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE); } } /* Take done or failed requests out of schedule. Give back processed urbs. */ static void finish_request(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, struct urb *urb, int status) __releases(isp1362_hcd->lock) __acquires(isp1362_hcd->lock) { urb->hcpriv = NULL; ep->error_count = 0; if (usb_pipecontrol(urb->pipe)) ep->nextpid = USB_PID_SETUP; URB_DBG("%s: req %d FA %d ep%d%s %s: len %d/%d %s stat %d\n", __func__, ep->num_req, usb_pipedevice(urb->pipe), usb_pipeendpoint(urb->pipe), !usb_pipein(urb->pipe) ? "out" : "in", usb_pipecontrol(urb->pipe) ? "ctrl" : usb_pipeint(urb->pipe) ? "int" : usb_pipebulk(urb->pipe) ? "bulk" : "iso", urb->actual_length, urb->transfer_buffer_length, !(urb->transfer_flags & URB_SHORT_NOT_OK) ? "short_ok" : "", urb->status); usb_hcd_unlink_urb_from_ep(isp1362_hcd_to_hcd(isp1362_hcd), urb); spin_unlock(&isp1362_hcd->lock); usb_hcd_giveback_urb(isp1362_hcd_to_hcd(isp1362_hcd), urb, status); spin_lock(&isp1362_hcd->lock); /* take idle endpoints out of the schedule right away */ if (!list_empty(&ep->hep->urb_list)) return; /* async deschedule */ if (!list_empty(&ep->schedule)) { list_del_init(&ep->schedule); return; } if (ep->interval) { /* periodic deschedule */ DBG(1, "deschedule qh%d/%p branch %d load %d bandwidth %d -> %d\n", ep->interval, ep, ep->branch, ep->load, isp1362_hcd->load[ep->branch], isp1362_hcd->load[ep->branch] - ep->load); isp1362_hcd->load[ep->branch] -= ep->load; ep->branch = PERIODIC_SIZE; } } /* * Analyze transfer results, handle partial transfers and errors */ static void postproc_ep(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep) { struct urb *urb = get_urb(ep); struct usb_device *udev; struct ptd *ptd; int short_ok; u16 len; int urbstat = -EINPROGRESS; u8 cc; DBG(2, "%s: ep %p req %d\n", __func__, ep, ep->num_req); udev = urb->dev; ptd = &ep->ptd; cc = PTD_GET_CC(ptd); if (cc == PTD_NOTACCESSED) { pr_err("%s: req %d PTD %p Untouched by ISP1362\n", __func__, ep->num_req, ptd); cc = PTD_DEVNOTRESP; } short_ok = !(urb->transfer_flags & URB_SHORT_NOT_OK); len = urb->transfer_buffer_length - urb->actual_length; /* Data underrun is special. For allowed underrun we clear the error and continue as normal. For forbidden underrun we finish the DATA stage immediately while for control transfer, we do a STATUS stage. */ if (cc == PTD_DATAUNDERRUN) { if (short_ok) { DBG(1, "%s: req %d Allowed data underrun short_%sok %d/%d/%d byte\n", __func__, ep->num_req, short_ok ? "" : "not_", PTD_GET_COUNT(ptd), ep->maxpacket, len); cc = PTD_CC_NOERROR; urbstat = 0; } else { DBG(1, "%s: req %d Data Underrun %s nextpid %02x short_%sok %d/%d/%d byte\n", __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", ep->nextpid, short_ok ? "" : "not_", PTD_GET_COUNT(ptd), ep->maxpacket, len); /* save the data underrun error code for later and * proceed with the status stage */ urb->actual_length += PTD_GET_COUNT(ptd); if (usb_pipecontrol(urb->pipe)) { ep->nextpid = USB_PID_ACK; BUG_ON(urb->actual_length > urb->transfer_buffer_length); if (urb->status == -EINPROGRESS) urb->status = cc_to_error[PTD_DATAUNDERRUN]; } else { usb_settoggle(udev, ep->epnum, ep->nextpid == USB_PID_OUT, PTD_GET_TOGGLE(ptd)); urbstat = cc_to_error[PTD_DATAUNDERRUN]; } goto out; } } if (cc != PTD_CC_NOERROR) { if (++ep->error_count >= 3 || cc == PTD_CC_STALL || cc == PTD_DATAOVERRUN) { urbstat = cc_to_error[cc]; DBG(1, "%s: req %d nextpid %02x, status %d, error %d, error_count %d\n", __func__, ep->num_req, ep->nextpid, urbstat, cc, ep->error_count); } goto out; } switch (ep->nextpid) { case USB_PID_OUT: if (PTD_GET_COUNT(ptd) != ep->length) pr_err("%s: count=%d len=%d\n", __func__, PTD_GET_COUNT(ptd), ep->length); BUG_ON(PTD_GET_COUNT(ptd) != ep->length); urb->actual_length += ep->length; BUG_ON(urb->actual_length > urb->transfer_buffer_length); usb_settoggle(udev, ep->epnum, 1, PTD_GET_TOGGLE(ptd)); if (urb->actual_length == urb->transfer_buffer_length) { DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__, ep->num_req, len, ep->maxpacket, urbstat); if (usb_pipecontrol(urb->pipe)) { DBG(3, "%s: req %d %s Wait for ACK\n", __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT"); ep->nextpid = USB_PID_ACK; } else { if (len % ep->maxpacket || !(urb->transfer_flags & URB_ZERO_PACKET)) { urbstat = 0; DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n", __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", urbstat, len, ep->maxpacket, urb->actual_length); } } } break; case USB_PID_IN: len = PTD_GET_COUNT(ptd); BUG_ON(len > ep->length); urb->actual_length += len; BUG_ON(urb->actual_length > urb->transfer_buffer_length); usb_settoggle(udev, ep->epnum, 0, PTD_GET_TOGGLE(ptd)); /* if transfer completed or (allowed) data underrun */ if ((urb->transfer_buffer_length == urb->actual_length) || len % ep->maxpacket) { DBG(3, "%s: req %d xfer complete %d/%d status %d -> 0\n", __func__, ep->num_req, len, ep->maxpacket, urbstat); if (usb_pipecontrol(urb->pipe)) { DBG(3, "%s: req %d %s Wait for ACK\n", __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT"); ep->nextpid = USB_PID_ACK; } else { urbstat = 0; DBG(3, "%s: req %d URB %s status %d count %d/%d/%d\n", __func__, ep->num_req, usb_pipein(urb->pipe) ? "IN" : "OUT", urbstat, len, ep->maxpacket, urb->actual_length); } } break; case USB_PID_SETUP: if (urb->transfer_buffer_length == urb->actual_length) { ep->nextpid = USB_PID_ACK; } else if (usb_pipeout(urb->pipe)) { usb_settoggle(udev, 0, 1, 1); ep->nextpid = USB_PID_OUT; } else { usb_settoggle(udev, 0, 0, 1); ep->nextpid = USB_PID_IN; } break; case USB_PID_ACK: DBG(3, "%s: req %d got ACK %d -> 0\n", __func__, ep->num_req, urbstat); WARN_ON(urbstat != -EINPROGRESS); urbstat = 0; ep->nextpid = 0; break; default: BUG_ON(1); } out: if (urbstat != -EINPROGRESS) { DBG(2, "%s: Finishing ep %p req %d urb %p status %d\n", __func__, ep, ep->num_req, urb, urbstat); finish_request(isp1362_hcd, ep, urb, urbstat); } } static void finish_unlinks(struct isp1362_hcd *isp1362_hcd) { struct isp1362_ep *ep; struct isp1362_ep *tmp; list_for_each_entry_safe(ep, tmp, &isp1362_hcd->remove_list, remove_list) { struct isp1362_ep_queue *epq = get_ptd_queue(isp1362_hcd, ep->ptd_offset); int index = ep->ptd_index; BUG_ON(epq == NULL); if (index >= 0) { DBG(1, "%s: remove PTD[%d] $%04x\n", __func__, index, ep->ptd_offset); BUG_ON(ep->num_ptds == 0); release_ptd_buffers(epq, ep); } if (!list_empty(&ep->hep->urb_list)) { struct urb *urb = get_urb(ep); DBG(1, "%s: Finishing req %d ep %p from remove_list\n", __func__, ep->num_req, ep); finish_request(isp1362_hcd, ep, urb, -ESHUTDOWN); } WARN_ON(list_empty(&ep->active)); if (!list_empty(&ep->active)) { list_del_init(&ep->active); DBG(1, "%s: ep %p removed from active list\n", __func__, ep); } list_del_init(&ep->remove_list); DBG(1, "%s: ep %p removed from remove_list\n", __func__, ep); } DBG(1, "%s: Done\n", __func__); } static inline void enable_atl_transfers(struct isp1362_hcd *isp1362_hcd, int count) { if (count > 0) { if (count < isp1362_hcd->atl_queue.ptd_count) isp1362_write_reg16(isp1362_hcd, HCATLDTC, count); isp1362_enable_int(isp1362_hcd, HCuPINT_ATL); isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map); isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); } else isp1362_enable_int(isp1362_hcd, HCuPINT_SOF); } static inline void enable_intl_transfers(struct isp1362_hcd *isp1362_hcd) { isp1362_enable_int(isp1362_hcd, HCuPINT_INTL); isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE); isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, isp1362_hcd->intl_queue.skip_map); } static inline void enable_istl_transfers(struct isp1362_hcd *isp1362_hcd, int flip) { isp1362_enable_int(isp1362_hcd, flip ? HCuPINT_ISTL1 : HCuPINT_ISTL0); isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, flip ? HCBUFSTAT_ISTL1_FULL : HCBUFSTAT_ISTL0_FULL); } static int submit_req(struct isp1362_hcd *isp1362_hcd, struct urb *urb, struct isp1362_ep *ep, struct isp1362_ep_queue *epq) { int index; prepare_ptd(isp1362_hcd, urb, ep, epq, 0); index = claim_ptd_buffers(epq, ep, ep->length); if (index == -ENOMEM) { DBG(1, "%s: req %d No free %s PTD available: %d, %08lx:%08lx\n", __func__, ep->num_req, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map); return index; } else if (index == -EOVERFLOW) { DBG(1, "%s: req %d Not enough space for %d byte %s PTD %d %08lx:%08lx\n", __func__, ep->num_req, ep->length, epq->name, ep->num_ptds, epq->buf_map, epq->skip_map); return index; } else BUG_ON(index < 0); list_add_tail(&ep->active, &epq->active); DBG(1, "%s: ep %p req %d len %d added to active list %p\n", __func__, ep, ep->num_req, ep->length, &epq->active); DBG(1, "%s: Submitting %s PTD $%04x for ep %p req %d\n", __func__, epq->name, ep->ptd_offset, ep, ep->num_req); isp1362_write_ptd(isp1362_hcd, ep, epq); __clear_bit(ep->ptd_index, &epq->skip_map); return 0; } static void start_atl_transfers(struct isp1362_hcd *isp1362_hcd) { int ptd_count = 0; struct isp1362_ep_queue *epq = &isp1362_hcd->atl_queue; struct isp1362_ep *ep; int defer = 0; if (atomic_read(&epq->finishing)) { DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); return; } list_for_each_entry(ep, &isp1362_hcd->async, schedule) { struct urb *urb = get_urb(ep); int ret; if (!list_empty(&ep->active)) { DBG(2, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep); continue; } DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name, ep, ep->num_req); ret = submit_req(isp1362_hcd, urb, ep, epq); if (ret == -ENOMEM) { defer = 1; break; } else if (ret == -EOVERFLOW) { defer = 1; continue; } #ifdef BUGGY_PXA2XX_UDC_USBTEST defer = ep->nextpid == USB_PID_SETUP; #endif ptd_count++; } /* Avoid starving of endpoints */ if (isp1362_hcd->async.next != isp1362_hcd->async.prev) { DBG(2, "%s: Cycling ASYNC schedule %d\n", __func__, ptd_count); list_move(&isp1362_hcd->async, isp1362_hcd->async.next); } if (ptd_count || defer) enable_atl_transfers(isp1362_hcd, defer ? 0 : ptd_count); epq->ptd_count += ptd_count; if (epq->ptd_count > epq->stat_maxptds) { epq->stat_maxptds = epq->ptd_count; DBG(0, "%s: max_ptds: %d\n", __func__, epq->stat_maxptds); } } static void start_intl_transfers(struct isp1362_hcd *isp1362_hcd) { int ptd_count = 0; struct isp1362_ep_queue *epq = &isp1362_hcd->intl_queue; struct isp1362_ep *ep; if (atomic_read(&epq->finishing)) { DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); return; } list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) { struct urb *urb = get_urb(ep); int ret; if (!list_empty(&ep->active)) { DBG(1, "%s: Skipping active %s ep %p\n", __func__, epq->name, ep); continue; } DBG(1, "%s: Processing %s ep %p req %d\n", __func__, epq->name, ep, ep->num_req); ret = submit_req(isp1362_hcd, urb, ep, epq); if (ret == -ENOMEM) break; else if (ret == -EOVERFLOW) continue; ptd_count++; } if (ptd_count) { static int last_count; if (ptd_count != last_count) { DBG(0, "%s: ptd_count: %d\n", __func__, ptd_count); last_count = ptd_count; } enable_intl_transfers(isp1362_hcd); } epq->ptd_count += ptd_count; if (epq->ptd_count > epq->stat_maxptds) epq->stat_maxptds = epq->ptd_count; } static inline int next_ptd(struct isp1362_ep_queue *epq, struct isp1362_ep *ep) { u16 ptd_offset = ep->ptd_offset; int num_ptds = (ep->length + PTD_HEADER_SIZE + (epq->blk_size - 1)) / epq->blk_size; DBG(2, "%s: PTD offset $%04x + %04x => %d * %04x -> $%04x\n", __func__, ptd_offset, ep->length, num_ptds, epq->blk_size, ptd_offset + num_ptds * epq->blk_size); ptd_offset += num_ptds * epq->blk_size; if (ptd_offset < epq->buf_start + epq->buf_size) return ptd_offset; else return -ENOMEM; } static void start_iso_transfers(struct isp1362_hcd *isp1362_hcd) { int ptd_count = 0; int flip = isp1362_hcd->istl_flip; struct isp1362_ep_queue *epq; int ptd_offset; struct isp1362_ep *ep; struct isp1362_ep *tmp; u16 fno = isp1362_read_reg32(isp1362_hcd, HCFMNUM); fill2: epq = &isp1362_hcd->istl_queue[flip]; if (atomic_read(&epq->finishing)) { DBG(1, "%s: finish_transfers is active for %s\n", __func__, epq->name); return; } if (!list_empty(&epq->active)) return; ptd_offset = epq->buf_start; list_for_each_entry_safe(ep, tmp, &isp1362_hcd->isoc, schedule) { struct urb *urb = get_urb(ep); s16 diff = fno - (u16)urb->start_frame; DBG(1, "%s: Processing %s ep %p\n", __func__, epq->name, ep); if (diff > urb->number_of_packets) { /* time frame for this URB has elapsed */ finish_request(isp1362_hcd, ep, urb, -EOVERFLOW); continue; } else if (diff < -1) { /* URB is not due in this frame or the next one. * Comparing with '-1' instead of '0' accounts for double * buffering in the ISP1362 which enables us to queue the PTD * one frame ahead of time */ } else if (diff == -1) { /* submit PTD's that are due in the next frame */ prepare_ptd(isp1362_hcd, urb, ep, epq, fno); if (ptd_offset + PTD_HEADER_SIZE + ep->length > epq->buf_start + epq->buf_size) { pr_err("%s: Not enough ISO buffer space for %d byte PTD\n", __func__, ep->length); continue; } ep->ptd_offset = ptd_offset; list_add_tail(&ep->active, &epq->active); ptd_offset = next_ptd(epq, ep); if (ptd_offset < 0) { pr_warn("%s: req %d No more %s PTD buffers available\n", __func__, ep->num_req, epq->name); break; } } } list_for_each_entry(ep, &epq->active, active) { if (epq->active.next == &ep->active) ep->ptd.mps |= PTD_LAST_MSK; isp1362_write_ptd(isp1362_hcd, ep, epq); ptd_count++; } if (ptd_count) enable_istl_transfers(isp1362_hcd, flip); epq->ptd_count += ptd_count; if (epq->ptd_count > epq->stat_maxptds) epq->stat_maxptds = epq->ptd_count; /* check, whether the second ISTL buffer may also be filled */ if (!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & (flip ? HCBUFSTAT_ISTL0_FULL : HCBUFSTAT_ISTL1_FULL))) { fno++; ptd_count = 0; flip = 1 - flip; goto fill2; } } static void finish_transfers(struct isp1362_hcd *isp1362_hcd, unsigned long done_map, struct isp1362_ep_queue *epq) { struct isp1362_ep *ep; struct isp1362_ep *tmp; if (list_empty(&epq->active)) { DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name); return; } DBG(1, "%s: Finishing %s transfers %08lx\n", __func__, epq->name, done_map); atomic_inc(&epq->finishing); list_for_each_entry_safe(ep, tmp, &epq->active, active) { int index = ep->ptd_index; DBG(1, "%s: Checking %s PTD[%02x] $%04x\n", __func__, epq->name, index, ep->ptd_offset); BUG_ON(index < 0); if (__test_and_clear_bit(index, &done_map)) { isp1362_read_ptd(isp1362_hcd, ep, epq); epq->free_ptd = index; BUG_ON(ep->num_ptds == 0); release_ptd_buffers(epq, ep); DBG(1, "%s: ep %p req %d removed from active list\n", __func__, ep, ep->num_req); if (!list_empty(&ep->remove_list)) { list_del_init(&ep->remove_list); DBG(1, "%s: ep %p removed from remove list\n", __func__, ep); } DBG(1, "%s: Postprocessing %s ep %p req %d\n", __func__, epq->name, ep, ep->num_req); postproc_ep(isp1362_hcd, ep); } if (!done_map) break; } if (done_map) pr_warn("%s: done_map not clear: %08lx:%08lx\n", __func__, done_map, epq->skip_map); atomic_dec(&epq->finishing); } static void finish_iso_transfers(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep_queue *epq) { struct isp1362_ep *ep; struct isp1362_ep *tmp; if (list_empty(&epq->active)) { DBG(1, "%s: Nothing to do for %s queue\n", __func__, epq->name); return; } DBG(1, "%s: Finishing %s transfers\n", __func__, epq->name); atomic_inc(&epq->finishing); list_for_each_entry_safe(ep, tmp, &epq->active, active) { DBG(1, "%s: Checking PTD $%04x\n", __func__, ep->ptd_offset); isp1362_read_ptd(isp1362_hcd, ep, epq); DBG(1, "%s: Postprocessing %s ep %p\n", __func__, epq->name, ep); postproc_ep(isp1362_hcd, ep); } WARN_ON(epq->blk_size != 0); atomic_dec(&epq->finishing); } static irqreturn_t isp1362_irq(struct usb_hcd *hcd) { int handled = 0; struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); u16 irqstat; u16 svc_mask; spin_lock(&isp1362_hcd->lock); BUG_ON(isp1362_hcd->irq_active++); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); irqstat = isp1362_read_reg16(isp1362_hcd, HCuPINT); DBG(3, "%s: got IRQ %04x:%04x\n", __func__, irqstat, isp1362_hcd->irqenb); /* only handle interrupts that are currently enabled */ irqstat &= isp1362_hcd->irqenb; isp1362_write_reg16(isp1362_hcd, HCuPINT, irqstat); svc_mask = irqstat; if (irqstat & HCuPINT_SOF) { isp1362_hcd->irqenb &= ~HCuPINT_SOF; isp1362_hcd->irq_stat[ISP1362_INT_SOF]++; handled = 1; svc_mask &= ~HCuPINT_SOF; DBG(3, "%s: SOF\n", __func__); isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM); if (!list_empty(&isp1362_hcd->remove_list)) finish_unlinks(isp1362_hcd); if (!list_empty(&isp1362_hcd->async) && !(irqstat & HCuPINT_ATL)) { if (list_empty(&isp1362_hcd->atl_queue.active)) { start_atl_transfers(isp1362_hcd); } else { isp1362_enable_int(isp1362_hcd, HCuPINT_ATL); isp1362_write_reg32(isp1362_hcd, HCATLSKIP, isp1362_hcd->atl_queue.skip_map); isp1362_set_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); } } } if (irqstat & HCuPINT_ISTL0) { isp1362_hcd->irq_stat[ISP1362_INT_ISTL0]++; handled = 1; svc_mask &= ~HCuPINT_ISTL0; isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL0_FULL); DBG(1, "%s: ISTL0\n", __func__); WARN_ON((int)!!isp1362_hcd->istl_flip); WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL0_ACTIVE); WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL0_DONE)); isp1362_hcd->irqenb &= ~HCuPINT_ISTL0; } if (irqstat & HCuPINT_ISTL1) { isp1362_hcd->irq_stat[ISP1362_INT_ISTL1]++; handled = 1; svc_mask &= ~HCuPINT_ISTL1; isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ISTL1_FULL); DBG(1, "%s: ISTL1\n", __func__); WARN_ON(!(int)isp1362_hcd->istl_flip); WARN_ON(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL1_ACTIVE); WARN_ON(!(isp1362_read_reg16(isp1362_hcd, HCBUFSTAT) & HCBUFSTAT_ISTL1_DONE)); isp1362_hcd->irqenb &= ~HCuPINT_ISTL1; } if (irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) { WARN_ON((irqstat & (HCuPINT_ISTL0 | HCuPINT_ISTL1)) == (HCuPINT_ISTL0 | HCuPINT_ISTL1)); finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[isp1362_hcd->istl_flip]); start_iso_transfers(isp1362_hcd); isp1362_hcd->istl_flip = 1 - isp1362_hcd->istl_flip; } if (irqstat & HCuPINT_INTL) { u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE); u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCINTLSKIP); isp1362_hcd->irq_stat[ISP1362_INT_INTL]++; DBG(2, "%s: INTL\n", __func__); svc_mask &= ~HCuPINT_INTL; isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, skip_map | done_map); if (~(done_map | skip_map) == 0) /* All PTDs are finished, disable INTL processing entirely */ isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_INTL_ACTIVE); handled = 1; WARN_ON(!done_map); if (done_map) { DBG(3, "%s: INTL done_map %08x\n", __func__, done_map); finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue); start_intl_transfers(isp1362_hcd); } } if (irqstat & HCuPINT_ATL) { u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE); u32 skip_map = isp1362_read_reg32(isp1362_hcd, HCATLSKIP); isp1362_hcd->irq_stat[ISP1362_INT_ATL]++; DBG(2, "%s: ATL\n", __func__); svc_mask &= ~HCuPINT_ATL; isp1362_write_reg32(isp1362_hcd, HCATLSKIP, skip_map | done_map); if (~(done_map | skip_map) == 0) isp1362_clr_mask16(isp1362_hcd, HCBUFSTAT, HCBUFSTAT_ATL_ACTIVE); if (done_map) { DBG(3, "%s: ATL done_map %08x\n", __func__, done_map); finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue); start_atl_transfers(isp1362_hcd); } handled = 1; } if (irqstat & HCuPINT_OPR) { u32 intstat = isp1362_read_reg32(isp1362_hcd, HCINTSTAT); isp1362_hcd->irq_stat[ISP1362_INT_OPR]++; svc_mask &= ~HCuPINT_OPR; DBG(2, "%s: OPR %08x:%08x\n", __func__, intstat, isp1362_hcd->intenb); intstat &= isp1362_hcd->intenb; if (intstat & OHCI_INTR_UE) { pr_err("Unrecoverable error\n"); /* FIXME: do here reset or cleanup or whatever */ } if (intstat & OHCI_INTR_RHSC) { isp1362_hcd->rhstatus = isp1362_read_reg32(isp1362_hcd, HCRHSTATUS); isp1362_hcd->rhport[0] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1); isp1362_hcd->rhport[1] = isp1362_read_reg32(isp1362_hcd, HCRHPORT2); } if (intstat & OHCI_INTR_RD) { pr_info("%s: RESUME DETECTED\n", __func__); isp1362_show_reg(isp1362_hcd, HCCONTROL); usb_hcd_resume_root_hub(hcd); } isp1362_write_reg32(isp1362_hcd, HCINTSTAT, intstat); irqstat &= ~HCuPINT_OPR; handled = 1; } if (irqstat & HCuPINT_SUSP) { isp1362_hcd->irq_stat[ISP1362_INT_SUSP]++; handled = 1; svc_mask &= ~HCuPINT_SUSP; pr_info("%s: SUSPEND IRQ\n", __func__); } if (irqstat & HCuPINT_CLKRDY) { isp1362_hcd->irq_stat[ISP1362_INT_CLKRDY]++; handled = 1; isp1362_hcd->irqenb &= ~HCuPINT_CLKRDY; svc_mask &= ~HCuPINT_CLKRDY; pr_info("%s: CLKRDY IRQ\n", __func__); } if (svc_mask) pr_err("%s: Unserviced interrupt(s) %04x\n", __func__, svc_mask); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); isp1362_hcd->irq_active--; spin_unlock(&isp1362_hcd->lock); return IRQ_RETVAL(handled); } /*-------------------------------------------------------------------------*/ #define MAX_PERIODIC_LOAD 900 /* out of 1000 usec */ static int balance(struct isp1362_hcd *isp1362_hcd, u16 interval, u16 load) { int i, branch = -ENOSPC; /* search for the least loaded schedule branch of that interval * which has enough bandwidth left unreserved. */ for (i = 0; i < interval; i++) { if (branch < 0 || isp1362_hcd->load[branch] > isp1362_hcd->load[i]) { int j; for (j = i; j < PERIODIC_SIZE; j += interval) { if ((isp1362_hcd->load[j] + load) > MAX_PERIODIC_LOAD) { pr_err("%s: new load %d load[%02x] %d max %d\n", __func__, load, j, isp1362_hcd->load[j], MAX_PERIODIC_LOAD); break; } } if (j < PERIODIC_SIZE) continue; branch = i; } } return branch; } /* NB! ALL the code above this point runs with isp1362_hcd->lock held, irqs off */ /*-------------------------------------------------------------------------*/ static int isp1362_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); struct usb_device *udev = urb->dev; unsigned int pipe = urb->pipe; int is_out = !usb_pipein(pipe); int type = usb_pipetype(pipe); int epnum = usb_pipeendpoint(pipe); struct usb_host_endpoint *hep = urb->ep; struct isp1362_ep *ep = NULL; unsigned long flags; int retval = 0; DBG(3, "%s: urb %p\n", __func__, urb); if (type == PIPE_ISOCHRONOUS) { pr_err("Isochronous transfers not supported\n"); return -ENOSPC; } URB_DBG("%s: FA %d ep%d%s %s: len %d %s%s\n", __func__, usb_pipedevice(pipe), epnum, is_out ? "out" : "in", usb_pipecontrol(pipe) ? "ctrl" : usb_pipeint(pipe) ? "int" : usb_pipebulk(pipe) ? "bulk" : "iso", urb->transfer_buffer_length, (urb->transfer_flags & URB_ZERO_PACKET) ? "ZERO_PACKET " : "", !(urb->transfer_flags & URB_SHORT_NOT_OK) ? "short_ok" : ""); /* avoid all allocations within spinlocks: request or endpoint */ if (!hep->hcpriv) { ep = kzalloc(sizeof *ep, mem_flags); if (!ep) return -ENOMEM; } spin_lock_irqsave(&isp1362_hcd->lock, flags); /* don't submit to a dead or disabled port */ if (!((isp1362_hcd->rhport[0] | isp1362_hcd->rhport[1]) & USB_PORT_STAT_ENABLE) || !HC_IS_RUNNING(hcd->state)) { kfree(ep); retval = -ENODEV; goto fail_not_linked; } retval = usb_hcd_link_urb_to_ep(hcd, urb); if (retval) { kfree(ep); goto fail_not_linked; } if (hep->hcpriv) { ep = hep->hcpriv; } else { INIT_LIST_HEAD(&ep->schedule); INIT_LIST_HEAD(&ep->active); INIT_LIST_HEAD(&ep->remove_list); ep->udev = usb_get_dev(udev); ep->hep = hep; ep->epnum = epnum; ep->maxpacket = usb_maxpacket(udev, urb->pipe); ep->ptd_offset = -EINVAL; ep->ptd_index = -EINVAL; usb_settoggle(udev, epnum, is_out, 0); if (type == PIPE_CONTROL) ep->nextpid = USB_PID_SETUP; else if (is_out) ep->nextpid = USB_PID_OUT; else ep->nextpid = USB_PID_IN; switch (type) { case PIPE_ISOCHRONOUS: case PIPE_INTERRUPT: if (urb->interval > PERIODIC_SIZE) urb->interval = PERIODIC_SIZE; ep->interval = urb->interval; ep->branch = PERIODIC_SIZE; ep->load = usb_calc_bus_time(udev->speed, !is_out, type == PIPE_ISOCHRONOUS, usb_maxpacket(udev, pipe)) / 1000; break; } hep->hcpriv = ep; } ep->num_req = isp1362_hcd->req_serial++; /* maybe put endpoint into schedule */ switch (type) { case PIPE_CONTROL: case PIPE_BULK: if (list_empty(&ep->schedule)) { DBG(1, "%s: Adding ep %p req %d to async schedule\n", __func__, ep, ep->num_req); list_add_tail(&ep->schedule, &isp1362_hcd->async); } break; case PIPE_ISOCHRONOUS: case PIPE_INTERRUPT: urb->interval = ep->interval; /* urb submitted for already existing EP */ if (ep->branch < PERIODIC_SIZE) break; retval = balance(isp1362_hcd, ep->interval, ep->load); if (retval < 0) { pr_err("%s: balance returned %d\n", __func__, retval); goto fail; } ep->branch = retval; retval = 0; isp1362_hcd->fmindex = isp1362_read_reg32(isp1362_hcd, HCFMNUM); DBG(1, "%s: Current frame %04x branch %02x start_frame %04x(%04x)\n", __func__, isp1362_hcd->fmindex, ep->branch, ((isp1362_hcd->fmindex + PERIODIC_SIZE - 1) & ~(PERIODIC_SIZE - 1)) + ep->branch, (isp1362_hcd->fmindex & (PERIODIC_SIZE - 1)) + ep->branch); if (list_empty(&ep->schedule)) { if (type == PIPE_ISOCHRONOUS) { u16 frame = isp1362_hcd->fmindex; frame += max_t(u16, 8, ep->interval); frame &= ~(ep->interval - 1); frame |= ep->branch; if (frame_before(frame, isp1362_hcd->fmindex)) frame += ep->interval; urb->start_frame = frame; DBG(1, "%s: Adding ep %p to isoc schedule\n", __func__, ep); list_add_tail(&ep->schedule, &isp1362_hcd->isoc); } else { DBG(1, "%s: Adding ep %p to periodic schedule\n", __func__, ep); list_add_tail(&ep->schedule, &isp1362_hcd->periodic); } } else DBG(1, "%s: ep %p already scheduled\n", __func__, ep); DBG(2, "%s: load %d bandwidth %d -> %d\n", __func__, ep->load / ep->interval, isp1362_hcd->load[ep->branch], isp1362_hcd->load[ep->branch] + ep->load); isp1362_hcd->load[ep->branch] += ep->load; } urb->hcpriv = hep; ALIGNSTAT(isp1362_hcd, urb->transfer_buffer); switch (type) { case PIPE_CONTROL: case PIPE_BULK: start_atl_transfers(isp1362_hcd); break; case PIPE_INTERRUPT: start_intl_transfers(isp1362_hcd); break; case PIPE_ISOCHRONOUS: start_iso_transfers(isp1362_hcd); break; default: BUG(); } fail: if (retval) usb_hcd_unlink_urb_from_ep(hcd, urb); fail_not_linked: spin_unlock_irqrestore(&isp1362_hcd->lock, flags); if (retval) DBG(0, "%s: urb %p failed with %d\n", __func__, urb, retval); return retval; } static int isp1362_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); struct usb_host_endpoint *hep; unsigned long flags; struct isp1362_ep *ep; int retval = 0; DBG(3, "%s: urb %p\n", __func__, urb); spin_lock_irqsave(&isp1362_hcd->lock, flags); retval = usb_hcd_check_unlink_urb(hcd, urb, status); if (retval) goto done; hep = urb->hcpriv; if (!hep) { spin_unlock_irqrestore(&isp1362_hcd->lock, flags); return -EIDRM; } ep = hep->hcpriv; if (ep) { /* In front of queue? */ if (ep->hep->urb_list.next == &urb->urb_list) { if (!list_empty(&ep->active)) { DBG(1, "%s: urb %p ep %p req %d active PTD[%d] $%04x\n", __func__, urb, ep, ep->num_req, ep->ptd_index, ep->ptd_offset); /* disable processing and queue PTD for removal */ remove_ptd(isp1362_hcd, ep); urb = NULL; } } if (urb) { DBG(1, "%s: Finishing ep %p req %d\n", __func__, ep, ep->num_req); finish_request(isp1362_hcd, ep, urb, status); } else DBG(1, "%s: urb %p active; wait4irq\n", __func__, urb); } else { pr_warn("%s: No EP in URB %p\n", __func__, urb); retval = -EINVAL; } done: spin_unlock_irqrestore(&isp1362_hcd->lock, flags); DBG(3, "%s: exit\n", __func__); return retval; } static void isp1362_endpoint_disable(struct usb_hcd *hcd, struct usb_host_endpoint *hep) { struct isp1362_ep *ep = hep->hcpriv; struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; DBG(1, "%s: ep %p\n", __func__, ep); if (!ep) return; spin_lock_irqsave(&isp1362_hcd->lock, flags); if (!list_empty(&hep->urb_list)) { if (!list_empty(&ep->active) && list_empty(&ep->remove_list)) { DBG(1, "%s: Removing ep %p req %d PTD[%d] $%04x\n", __func__, ep, ep->num_req, ep->ptd_index, ep->ptd_offset); remove_ptd(isp1362_hcd, ep); pr_info("%s: Waiting for Interrupt to clean up\n", __func__); } } spin_unlock_irqrestore(&isp1362_hcd->lock, flags); /* Wait for interrupt to clear out active list */ while (!list_empty(&ep->active)) msleep(1); DBG(1, "%s: Freeing EP %p\n", __func__, ep); usb_put_dev(ep->udev); kfree(ep); hep->hcpriv = NULL; } static int isp1362_get_frame(struct usb_hcd *hcd) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); u32 fmnum; unsigned long flags; spin_lock_irqsave(&isp1362_hcd->lock, flags); fmnum = isp1362_read_reg32(isp1362_hcd, HCFMNUM); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); return (int)fmnum; } /*-------------------------------------------------------------------------*/ /* Adapted from ohci-hub.c */ static int isp1362_hub_status_data(struct usb_hcd *hcd, char *buf) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); int ports, i, changed = 0; unsigned long flags; if (!HC_IS_RUNNING(hcd->state)) return -ESHUTDOWN; /* Report no status change now, if we are scheduled to be called later */ if (timer_pending(&hcd->rh_timer)) return 0; ports = isp1362_hcd->rhdesca & RH_A_NDP; BUG_ON(ports > 2); spin_lock_irqsave(&isp1362_hcd->lock, flags); /* init status */ if (isp1362_hcd->rhstatus & (RH_HS_LPSC | RH_HS_OCIC)) buf[0] = changed = 1; else buf[0] = 0; for (i = 0; i < ports; i++) { u32 status = isp1362_hcd->rhport[i]; if (status & (RH_PS_CSC | RH_PS_PESC | RH_PS_PSSC | RH_PS_OCIC | RH_PS_PRSC)) { changed = 1; buf[0] |= 1 << (i + 1); continue; } if (!(status & RH_PS_CCS)) continue; } spin_unlock_irqrestore(&isp1362_hcd->lock, flags); return changed; } static void isp1362_hub_descriptor(struct isp1362_hcd *isp1362_hcd, struct usb_hub_descriptor *desc) { u32 reg = isp1362_hcd->rhdesca; DBG(3, "%s: enter\n", __func__); desc->bDescriptorType = USB_DT_HUB; desc->bDescLength = 9; desc->bHubContrCurrent = 0; desc->bNbrPorts = reg & 0x3; /* Power switching, device type, overcurrent. */ desc->wHubCharacteristics = cpu_to_le16((reg >> 8) & (HUB_CHAR_LPSM | HUB_CHAR_COMPOUND | HUB_CHAR_OCPM)); DBG(0, "%s: hubcharacteristics = %02x\n", __func__, desc->wHubCharacteristics); desc->bPwrOn2PwrGood = (reg >> 24) & 0xff; /* ports removable, and legacy PortPwrCtrlMask */ desc->u.hs.DeviceRemovable[0] = desc->bNbrPorts == 1 ? 1 << 1 : 3 << 1; desc->u.hs.DeviceRemovable[1] = ~0; DBG(3, "%s: exit\n", __func__); } /* Adapted from ohci-hub.c */ static int isp1362_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex, char *buf, u16 wLength) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); int retval = 0; unsigned long flags; unsigned long t1; int ports = isp1362_hcd->rhdesca & RH_A_NDP; u32 tmp = 0; switch (typeReq) { case ClearHubFeature: DBG(0, "ClearHubFeature: "); switch (wValue) { case C_HUB_OVER_CURRENT: DBG(0, "C_HUB_OVER_CURRENT\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_OCIC); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); break; case C_HUB_LOCAL_POWER: DBG(0, "C_HUB_LOCAL_POWER\n"); break; default: goto error; } break; case SetHubFeature: DBG(0, "SetHubFeature: "); switch (wValue) { case C_HUB_OVER_CURRENT: case C_HUB_LOCAL_POWER: DBG(0, "C_HUB_OVER_CURRENT or C_HUB_LOCAL_POWER\n"); break; default: goto error; } break; case GetHubDescriptor: DBG(0, "GetHubDescriptor\n"); isp1362_hub_descriptor(isp1362_hcd, (struct usb_hub_descriptor *)buf); break; case GetHubStatus: DBG(0, "GetHubStatus\n"); put_unaligned(cpu_to_le32(0), (__le32 *) buf); break; case GetPortStatus: #ifndef VERBOSE DBG(0, "GetPortStatus\n"); #endif if (!wIndex || wIndex > ports) goto error; tmp = isp1362_hcd->rhport[--wIndex]; put_unaligned(cpu_to_le32(tmp), (__le32 *) buf); break; case ClearPortFeature: DBG(0, "ClearPortFeature: "); if (!wIndex || wIndex > ports) goto error; wIndex--; switch (wValue) { case USB_PORT_FEAT_ENABLE: DBG(0, "USB_PORT_FEAT_ENABLE\n"); tmp = RH_PS_CCS; break; case USB_PORT_FEAT_C_ENABLE: DBG(0, "USB_PORT_FEAT_C_ENABLE\n"); tmp = RH_PS_PESC; break; case USB_PORT_FEAT_SUSPEND: DBG(0, "USB_PORT_FEAT_SUSPEND\n"); tmp = RH_PS_POCI; break; case USB_PORT_FEAT_C_SUSPEND: DBG(0, "USB_PORT_FEAT_C_SUSPEND\n"); tmp = RH_PS_PSSC; break; case USB_PORT_FEAT_POWER: DBG(0, "USB_PORT_FEAT_POWER\n"); tmp = RH_PS_LSDA; break; case USB_PORT_FEAT_C_CONNECTION: DBG(0, "USB_PORT_FEAT_C_CONNECTION\n"); tmp = RH_PS_CSC; break; case USB_PORT_FEAT_C_OVER_CURRENT: DBG(0, "USB_PORT_FEAT_C_OVER_CURRENT\n"); tmp = RH_PS_OCIC; break; case USB_PORT_FEAT_C_RESET: DBG(0, "USB_PORT_FEAT_C_RESET\n"); tmp = RH_PS_PRSC; break; default: goto error; } spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, tmp); isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); break; case SetPortFeature: DBG(0, "SetPortFeature: "); if (!wIndex || wIndex > ports) goto error; wIndex--; switch (wValue) { case USB_PORT_FEAT_SUSPEND: DBG(0, "USB_PORT_FEAT_SUSPEND\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PSS); isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); break; case USB_PORT_FEAT_POWER: DBG(0, "USB_PORT_FEAT_POWER\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, RH_PS_PPS); isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); break; case USB_PORT_FEAT_RESET: DBG(0, "USB_PORT_FEAT_RESET\n"); spin_lock_irqsave(&isp1362_hcd->lock, flags); t1 = jiffies + msecs_to_jiffies(USB_RESET_WIDTH); while (time_before(jiffies, t1)) { /* spin until any current reset finishes */ for (;;) { tmp = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); if (!(tmp & RH_PS_PRS)) break; udelay(500); } if (!(tmp & RH_PS_CCS)) break; /* Reset lasts 10ms (claims datasheet) */ isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + wIndex, (RH_PS_PRS)); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); msleep(10); spin_lock_irqsave(&isp1362_hcd->lock, flags); } isp1362_hcd->rhport[wIndex] = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + wIndex); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); break; default: goto error; } break; default: error: /* "protocol stall" on error */ DBG(0, "PROTOCOL STALL\n"); retval = -EPIPE; } return retval; } #ifdef CONFIG_PM static int isp1362_bus_suspend(struct usb_hcd *hcd) { int status = 0; struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; if (time_before(jiffies, isp1362_hcd->next_statechange)) msleep(5); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) { case OHCI_USB_RESUME: DBG(0, "%s: resume/suspend?\n", __func__); isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS; isp1362_hcd->hc_control |= OHCI_USB_RESET; isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); fallthrough; case OHCI_USB_RESET: status = -EBUSY; pr_warn("%s: needs reinit!\n", __func__); goto done; case OHCI_USB_SUSPEND: pr_warn("%s: already suspended?\n", __func__); goto done; } DBG(0, "%s: suspend root hub\n", __func__); /* First stop any processing */ hcd->state = HC_STATE_QUIESCING; if (!list_empty(&isp1362_hcd->atl_queue.active) || !list_empty(&isp1362_hcd->intl_queue.active) || !list_empty(&isp1362_hcd->istl_queue[0] .active) || !list_empty(&isp1362_hcd->istl_queue[1] .active)) { int limit; isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0); isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0); isp1362_write_reg16(isp1362_hcd, HCBUFSTAT, 0); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); isp1362_write_reg32(isp1362_hcd, HCINTSTAT, OHCI_INTR_SF); DBG(0, "%s: stopping schedules ...\n", __func__); limit = 2000; while (limit > 0) { udelay(250); limit -= 250; if (isp1362_read_reg32(isp1362_hcd, HCINTSTAT) & OHCI_INTR_SF) break; } mdelay(7); if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ATL) { u32 done_map = isp1362_read_reg32(isp1362_hcd, HCATLDONE); finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->atl_queue); } if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_INTL) { u32 done_map = isp1362_read_reg32(isp1362_hcd, HCINTLDONE); finish_transfers(isp1362_hcd, done_map, &isp1362_hcd->intl_queue); } if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL0) finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[0]); if (isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_ISTL1) finish_iso_transfers(isp1362_hcd, &isp1362_hcd->istl_queue[1]); } DBG(0, "%s: HCINTSTAT: %08x\n", __func__, isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); isp1362_write_reg32(isp1362_hcd, HCINTSTAT, isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); /* Suspend hub */ isp1362_hcd->hc_control = OHCI_USB_SUSPEND; isp1362_show_reg(isp1362_hcd, HCCONTROL); isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); isp1362_show_reg(isp1362_hcd, HCCONTROL); #if 1 isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); if ((isp1362_hcd->hc_control & OHCI_CTRL_HCFS) != OHCI_USB_SUSPEND) { pr_err("%s: controller won't suspend %08x\n", __func__, isp1362_hcd->hc_control); status = -EBUSY; } else #endif { /* no resumes until devices finish suspending */ isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(5); } done: if (status == 0) { hcd->state = HC_STATE_SUSPENDED; DBG(0, "%s: HCD suspended: %08x\n", __func__, isp1362_read_reg32(isp1362_hcd, HCCONTROL)); } spin_unlock_irqrestore(&isp1362_hcd->lock, flags); return status; } static int isp1362_bus_resume(struct usb_hcd *hcd) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); u32 port; unsigned long flags; int status = -EINPROGRESS; if (time_before(jiffies, isp1362_hcd->next_statechange)) msleep(5); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_hcd->hc_control = isp1362_read_reg32(isp1362_hcd, HCCONTROL); pr_info("%s: HCCONTROL: %08x\n", __func__, isp1362_hcd->hc_control); if (hcd->state == HC_STATE_RESUMING) { pr_warn("%s: duplicate resume\n", __func__); status = 0; } else switch (isp1362_hcd->hc_control & OHCI_CTRL_HCFS) { case OHCI_USB_SUSPEND: DBG(0, "%s: resume root hub\n", __func__); isp1362_hcd->hc_control &= ~OHCI_CTRL_HCFS; isp1362_hcd->hc_control |= OHCI_USB_RESUME; isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); break; case OHCI_USB_RESUME: /* HCFS changes sometime after INTR_RD */ DBG(0, "%s: remote wakeup\n", __func__); break; case OHCI_USB_OPER: DBG(0, "%s: odd resume\n", __func__); status = 0; hcd->self.root_hub->dev.power.power_state = PMSG_ON; break; default: /* RESET, we lost power */ DBG(0, "%s: root hub hardware reset\n", __func__); status = -EBUSY; } spin_unlock_irqrestore(&isp1362_hcd->lock, flags); if (status == -EBUSY) { DBG(0, "%s: Restarting HC\n", __func__); isp1362_hc_stop(hcd); return isp1362_hc_start(hcd); } if (status != -EINPROGRESS) return status; spin_lock_irqsave(&isp1362_hcd->lock, flags); port = isp1362_read_reg32(isp1362_hcd, HCRHDESCA) & RH_A_NDP; while (port--) { u32 stat = isp1362_read_reg32(isp1362_hcd, HCRHPORT1 + port); /* force global, not selective, resume */ if (!(stat & RH_PS_PSS)) { DBG(0, "%s: Not Resuming RH port %d\n", __func__, port); continue; } DBG(0, "%s: Resuming RH port %d\n", __func__, port); isp1362_write_reg32(isp1362_hcd, HCRHPORT1 + port, RH_PS_POCI); } spin_unlock_irqrestore(&isp1362_hcd->lock, flags); /* Some controllers (lucent) need extra-long delays */ hcd->state = HC_STATE_RESUMING; mdelay(20 /* usb 11.5.1.10 */ + 15); isp1362_hcd->hc_control = OHCI_USB_OPER; spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_show_reg(isp1362_hcd, HCCONTROL); isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); /* TRSMRCY */ msleep(10); /* keep it alive for ~5x suspend + resume costs */ isp1362_hcd->next_statechange = jiffies + msecs_to_jiffies(250); hcd->self.root_hub->dev.power.power_state = PMSG_ON; hcd->state = HC_STATE_RUNNING; return 0; } #else #define isp1362_bus_suspend NULL #define isp1362_bus_resume NULL #endif /*-------------------------------------------------------------------------*/ static void dump_irq(struct seq_file *s, char *label, u16 mask) { seq_printf(s, "%-15s %04x%s%s%s%s%s%s\n", label, mask, mask & HCuPINT_CLKRDY ? " clkrdy" : "", mask & HCuPINT_SUSP ? " susp" : "", mask & HCuPINT_OPR ? " opr" : "", mask & HCuPINT_EOT ? " eot" : "", mask & HCuPINT_ATL ? " atl" : "", mask & HCuPINT_SOF ? " sof" : ""); } static void dump_int(struct seq_file *s, char *label, u32 mask) { seq_printf(s, "%-15s %08x%s%s%s%s%s%s%s\n", label, mask, mask & OHCI_INTR_MIE ? " MIE" : "", mask & OHCI_INTR_RHSC ? " rhsc" : "", mask & OHCI_INTR_FNO ? " fno" : "", mask & OHCI_INTR_UE ? " ue" : "", mask & OHCI_INTR_RD ? " rd" : "", mask & OHCI_INTR_SF ? " sof" : "", mask & OHCI_INTR_SO ? " so" : ""); } static void dump_ctrl(struct seq_file *s, char *label, u32 mask) { seq_printf(s, "%-15s %08x%s%s%s\n", label, mask, mask & OHCI_CTRL_RWC ? " rwc" : "", mask & OHCI_CTRL_RWE ? " rwe" : "", ({ char *hcfs; switch (mask & OHCI_CTRL_HCFS) { case OHCI_USB_OPER: hcfs = " oper"; break; case OHCI_USB_RESET: hcfs = " reset"; break; case OHCI_USB_RESUME: hcfs = " resume"; break; case OHCI_USB_SUSPEND: hcfs = " suspend"; break; default: hcfs = " ?"; } hcfs; })); } static void dump_regs(struct seq_file *s, struct isp1362_hcd *isp1362_hcd) { seq_printf(s, "HCREVISION [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCREVISION), isp1362_read_reg32(isp1362_hcd, HCREVISION)); seq_printf(s, "HCCONTROL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCONTROL), isp1362_read_reg32(isp1362_hcd, HCCONTROL)); seq_printf(s, "HCCMDSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCCMDSTAT), isp1362_read_reg32(isp1362_hcd, HCCMDSTAT)); seq_printf(s, "HCINTSTAT [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTSTAT), isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); seq_printf(s, "HCINTENB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTENB), isp1362_read_reg32(isp1362_hcd, HCINTENB)); seq_printf(s, "HCFMINTVL [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMINTVL), isp1362_read_reg32(isp1362_hcd, HCFMINTVL)); seq_printf(s, "HCFMREM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMREM), isp1362_read_reg32(isp1362_hcd, HCFMREM)); seq_printf(s, "HCFMNUM [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCFMNUM), isp1362_read_reg32(isp1362_hcd, HCFMNUM)); seq_printf(s, "HCLSTHRESH [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCLSTHRESH), isp1362_read_reg32(isp1362_hcd, HCLSTHRESH)); seq_printf(s, "HCRHDESCA [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCA), isp1362_read_reg32(isp1362_hcd, HCRHDESCA)); seq_printf(s, "HCRHDESCB [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHDESCB), isp1362_read_reg32(isp1362_hcd, HCRHDESCB)); seq_printf(s, "HCRHSTATUS [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHSTATUS), isp1362_read_reg32(isp1362_hcd, HCRHSTATUS)); seq_printf(s, "HCRHPORT1 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT1), isp1362_read_reg32(isp1362_hcd, HCRHPORT1)); seq_printf(s, "HCRHPORT2 [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCRHPORT2), isp1362_read_reg32(isp1362_hcd, HCRHPORT2)); seq_printf(s, "\n"); seq_printf(s, "HCHWCFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCHWCFG), isp1362_read_reg16(isp1362_hcd, HCHWCFG)); seq_printf(s, "HCDMACFG [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCDMACFG), isp1362_read_reg16(isp1362_hcd, HCDMACFG)); seq_printf(s, "HCXFERCTR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCXFERCTR), isp1362_read_reg16(isp1362_hcd, HCXFERCTR)); seq_printf(s, "HCuPINT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINT), isp1362_read_reg16(isp1362_hcd, HCuPINT)); seq_printf(s, "HCuPINTENB [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCuPINTENB), isp1362_read_reg16(isp1362_hcd, HCuPINTENB)); seq_printf(s, "HCCHIPID [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCCHIPID), isp1362_read_reg16(isp1362_hcd, HCCHIPID)); seq_printf(s, "HCSCRATCH [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCSCRATCH), isp1362_read_reg16(isp1362_hcd, HCSCRATCH)); seq_printf(s, "HCBUFSTAT [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCBUFSTAT), isp1362_read_reg16(isp1362_hcd, HCBUFSTAT)); seq_printf(s, "HCDIRADDR [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCDIRADDR), isp1362_read_reg32(isp1362_hcd, HCDIRADDR)); #if 0 seq_printf(s, "HCDIRDATA [%02x] %04x\n", ISP1362_REG_NO(HCDIRDATA), isp1362_read_reg16(isp1362_hcd, HCDIRDATA)); #endif seq_printf(s, "HCISTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLBUFSZ), isp1362_read_reg16(isp1362_hcd, HCISTLBUFSZ)); seq_printf(s, "HCISTLRATE [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCISTLRATE), isp1362_read_reg16(isp1362_hcd, HCISTLRATE)); seq_printf(s, "\n"); seq_printf(s, "HCINTLBUFSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBUFSZ), isp1362_read_reg16(isp1362_hcd, HCINTLBUFSZ)); seq_printf(s, "HCINTLBLKSZ[%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLBLKSZ), isp1362_read_reg16(isp1362_hcd, HCINTLBLKSZ)); seq_printf(s, "HCINTLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLDONE), isp1362_read_reg32(isp1362_hcd, HCINTLDONE)); seq_printf(s, "HCINTLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLSKIP), isp1362_read_reg32(isp1362_hcd, HCINTLSKIP)); seq_printf(s, "HCINTLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLLAST), isp1362_read_reg32(isp1362_hcd, HCINTLLAST)); seq_printf(s, "HCINTLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCINTLCURR), isp1362_read_reg16(isp1362_hcd, HCINTLCURR)); seq_printf(s, "\n"); seq_printf(s, "HCATLBUFSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBUFSZ), isp1362_read_reg16(isp1362_hcd, HCATLBUFSZ)); seq_printf(s, "HCATLBLKSZ [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLBLKSZ), isp1362_read_reg16(isp1362_hcd, HCATLBLKSZ)); #if 0 seq_printf(s, "HCATLDONE [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDONE), isp1362_read_reg32(isp1362_hcd, HCATLDONE)); #endif seq_printf(s, "HCATLSKIP [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLSKIP), isp1362_read_reg32(isp1362_hcd, HCATLSKIP)); seq_printf(s, "HCATLLAST [%02x] %08x\n", ISP1362_REG_NO(ISP1362_REG_HCATLLAST), isp1362_read_reg32(isp1362_hcd, HCATLLAST)); seq_printf(s, "HCATLCURR [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLCURR), isp1362_read_reg16(isp1362_hcd, HCATLCURR)); seq_printf(s, "\n"); seq_printf(s, "HCATLDTC [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTC), isp1362_read_reg16(isp1362_hcd, HCATLDTC)); seq_printf(s, "HCATLDTCTO [%02x] %04x\n", ISP1362_REG_NO(ISP1362_REG_HCATLDTCTO), isp1362_read_reg16(isp1362_hcd, HCATLDTCTO)); } static int isp1362_show(struct seq_file *s, void *unused) { struct isp1362_hcd *isp1362_hcd = s->private; struct isp1362_ep *ep; int i; seq_printf(s, "%s\n%s version %s\n", isp1362_hcd_to_hcd(isp1362_hcd)->product_desc, hcd_name, DRIVER_VERSION); /* collect statistics to help estimate potential win for * DMA engines that care about alignment (PXA) */ seq_printf(s, "alignment: 16b/%ld 8b/%ld 4b/%ld 2b/%ld 1b/%ld\n", isp1362_hcd->stat16, isp1362_hcd->stat8, isp1362_hcd->stat4, isp1362_hcd->stat2, isp1362_hcd->stat1); seq_printf(s, "max # ptds in ATL fifo: %d\n", isp1362_hcd->atl_queue.stat_maxptds); seq_printf(s, "max # ptds in INTL fifo: %d\n", isp1362_hcd->intl_queue.stat_maxptds); seq_printf(s, "max # ptds in ISTL fifo: %d\n", max(isp1362_hcd->istl_queue[0] .stat_maxptds, isp1362_hcd->istl_queue[1] .stat_maxptds)); /* FIXME: don't show the following in suspended state */ spin_lock_irq(&isp1362_hcd->lock); dump_irq(s, "hc_irq_enable", isp1362_read_reg16(isp1362_hcd, HCuPINTENB)); dump_irq(s, "hc_irq_status", isp1362_read_reg16(isp1362_hcd, HCuPINT)); dump_int(s, "ohci_int_enable", isp1362_read_reg32(isp1362_hcd, HCINTENB)); dump_int(s, "ohci_int_status", isp1362_read_reg32(isp1362_hcd, HCINTSTAT)); dump_ctrl(s, "ohci_control", isp1362_read_reg32(isp1362_hcd, HCCONTROL)); for (i = 0; i < NUM_ISP1362_IRQS; i++) if (isp1362_hcd->irq_stat[i]) seq_printf(s, "%-15s: %d\n", ISP1362_INT_NAME(i), isp1362_hcd->irq_stat[i]); dump_regs(s, isp1362_hcd); list_for_each_entry(ep, &isp1362_hcd->async, schedule) { struct urb *urb; seq_printf(s, "%p, ep%d%s, maxpacket %d:\n", ep, ep->epnum, ({ char *s; switch (ep->nextpid) { case USB_PID_IN: s = "in"; break; case USB_PID_OUT: s = "out"; break; case USB_PID_SETUP: s = "setup"; break; case USB_PID_ACK: s = "status"; break; default: s = "?"; break; } s;}), ep->maxpacket) ; list_for_each_entry(urb, &ep->hep->urb_list, urb_list) { seq_printf(s, " urb%p, %d/%d\n", urb, urb->actual_length, urb->transfer_buffer_length); } } if (!list_empty(&isp1362_hcd->async)) seq_printf(s, "\n"); dump_ptd_queue(&isp1362_hcd->atl_queue); seq_printf(s, "periodic size= %d\n", PERIODIC_SIZE); list_for_each_entry(ep, &isp1362_hcd->periodic, schedule) { seq_printf(s, "branch:%2d load:%3d PTD[%d] $%04x:\n", ep->branch, isp1362_hcd->load[ep->branch], ep->ptd_index, ep->ptd_offset); seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n", ep->interval, ep, (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ", ep->udev->devnum, ep->epnum, (ep->epnum == 0) ? "" : ((ep->nextpid == USB_PID_IN) ? "in" : "out"), ep->maxpacket); } dump_ptd_queue(&isp1362_hcd->intl_queue); seq_printf(s, "ISO:\n"); list_for_each_entry(ep, &isp1362_hcd->isoc, schedule) { seq_printf(s, " %d/%p (%sdev%d ep%d%s max %d)\n", ep->interval, ep, (ep->udev->speed == USB_SPEED_FULL) ? "" : "ls ", ep->udev->devnum, ep->epnum, (ep->epnum == 0) ? "" : ((ep->nextpid == USB_PID_IN) ? "in" : "out"), ep->maxpacket); } spin_unlock_irq(&isp1362_hcd->lock); seq_printf(s, "\n"); return 0; } DEFINE_SHOW_ATTRIBUTE(isp1362); /* expect just one isp1362_hcd per system */ static void create_debug_file(struct isp1362_hcd *isp1362_hcd) { debugfs_create_file("isp1362", S_IRUGO, usb_debug_root, isp1362_hcd, &isp1362_fops); } static void remove_debug_file(struct isp1362_hcd *isp1362_hcd) { debugfs_lookup_and_remove("isp1362", usb_debug_root); } /*-------------------------------------------------------------------------*/ static void __isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) { int tmp = 20; isp1362_write_reg16(isp1362_hcd, HCSWRES, HCSWRES_MAGIC); isp1362_write_reg32(isp1362_hcd, HCCMDSTAT, OHCI_HCR); while (--tmp) { mdelay(1); if (!(isp1362_read_reg32(isp1362_hcd, HCCMDSTAT) & OHCI_HCR)) break; } if (!tmp) pr_err("Software reset timeout\n"); } static void isp1362_sw_reset(struct isp1362_hcd *isp1362_hcd) { unsigned long flags; spin_lock_irqsave(&isp1362_hcd->lock, flags); __isp1362_sw_reset(isp1362_hcd); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); } static int isp1362_mem_config(struct usb_hcd *hcd) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; u32 total; u16 istl_size = ISP1362_ISTL_BUFSIZE; u16 intl_blksize = ISP1362_INTL_BLKSIZE + PTD_HEADER_SIZE; u16 intl_size = ISP1362_INTL_BUFFERS * intl_blksize; u16 atl_blksize = ISP1362_ATL_BLKSIZE + PTD_HEADER_SIZE; u16 atl_buffers = (ISP1362_BUF_SIZE - (istl_size + intl_size)) / atl_blksize; u16 atl_size; int i; WARN_ON(istl_size & 3); WARN_ON(atl_blksize & 3); WARN_ON(intl_blksize & 3); WARN_ON(atl_blksize < PTD_HEADER_SIZE); WARN_ON(intl_blksize < PTD_HEADER_SIZE); BUG_ON((unsigned)ISP1362_INTL_BUFFERS > 32); if (atl_buffers > 32) atl_buffers = 32; atl_size = atl_buffers * atl_blksize; total = atl_size + intl_size + istl_size; dev_info(hcd->self.controller, "ISP1362 Memory usage:\n"); dev_info(hcd->self.controller, " ISTL: 2 * %4d: %4d @ $%04x:$%04x\n", istl_size / 2, istl_size, 0, istl_size / 2); dev_info(hcd->self.controller, " INTL: %4d * (%3zu+8): %4d @ $%04x\n", ISP1362_INTL_BUFFERS, intl_blksize - PTD_HEADER_SIZE, intl_size, istl_size); dev_info(hcd->self.controller, " ATL : %4d * (%3zu+8): %4d @ $%04x\n", atl_buffers, atl_blksize - PTD_HEADER_SIZE, atl_size, istl_size + intl_size); dev_info(hcd->self.controller, " USED/FREE: %4d %4d\n", total, ISP1362_BUF_SIZE - total); if (total > ISP1362_BUF_SIZE) { dev_err(hcd->self.controller, "%s: Memory requested: %d, available %d\n", __func__, total, ISP1362_BUF_SIZE); return -ENOMEM; } spin_lock_irqsave(&isp1362_hcd->lock, flags); for (i = 0; i < 2; i++) { isp1362_hcd->istl_queue[i].buf_start = i * istl_size / 2, isp1362_hcd->istl_queue[i].buf_size = istl_size / 2; isp1362_hcd->istl_queue[i].blk_size = 4; INIT_LIST_HEAD(&isp1362_hcd->istl_queue[i].active); snprintf(isp1362_hcd->istl_queue[i].name, sizeof(isp1362_hcd->istl_queue[i].name), "ISTL%d", i); DBG(3, "%s: %5s buf $%04x %d\n", __func__, isp1362_hcd->istl_queue[i].name, isp1362_hcd->istl_queue[i].buf_start, isp1362_hcd->istl_queue[i].buf_size); } isp1362_write_reg16(isp1362_hcd, HCISTLBUFSZ, istl_size / 2); isp1362_hcd->intl_queue.buf_start = istl_size; isp1362_hcd->intl_queue.buf_size = intl_size; isp1362_hcd->intl_queue.buf_count = ISP1362_INTL_BUFFERS; isp1362_hcd->intl_queue.blk_size = intl_blksize; isp1362_hcd->intl_queue.buf_avail = isp1362_hcd->intl_queue.buf_count; isp1362_hcd->intl_queue.skip_map = ~0; INIT_LIST_HEAD(&isp1362_hcd->intl_queue.active); isp1362_write_reg16(isp1362_hcd, HCINTLBUFSZ, isp1362_hcd->intl_queue.buf_size); isp1362_write_reg16(isp1362_hcd, HCINTLBLKSZ, isp1362_hcd->intl_queue.blk_size - PTD_HEADER_SIZE); isp1362_write_reg32(isp1362_hcd, HCINTLSKIP, ~0); isp1362_write_reg32(isp1362_hcd, HCINTLLAST, 1 << (ISP1362_INTL_BUFFERS - 1)); isp1362_hcd->atl_queue.buf_start = istl_size + intl_size; isp1362_hcd->atl_queue.buf_size = atl_size; isp1362_hcd->atl_queue.buf_count = atl_buffers; isp1362_hcd->atl_queue.blk_size = atl_blksize; isp1362_hcd->atl_queue.buf_avail = isp1362_hcd->atl_queue.buf_count; isp1362_hcd->atl_queue.skip_map = ~0; INIT_LIST_HEAD(&isp1362_hcd->atl_queue.active); isp1362_write_reg16(isp1362_hcd, HCATLBUFSZ, isp1362_hcd->atl_queue.buf_size); isp1362_write_reg16(isp1362_hcd, HCATLBLKSZ, isp1362_hcd->atl_queue.blk_size - PTD_HEADER_SIZE); isp1362_write_reg32(isp1362_hcd, HCATLSKIP, ~0); isp1362_write_reg32(isp1362_hcd, HCATLLAST, 1 << (atl_buffers - 1)); snprintf(isp1362_hcd->atl_queue.name, sizeof(isp1362_hcd->atl_queue.name), "ATL"); snprintf(isp1362_hcd->intl_queue.name, sizeof(isp1362_hcd->intl_queue.name), "INTL"); DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__, isp1362_hcd->intl_queue.name, isp1362_hcd->intl_queue.buf_start, ISP1362_INTL_BUFFERS, isp1362_hcd->intl_queue.blk_size, isp1362_hcd->intl_queue.buf_size); DBG(3, "%s: %5s buf $%04x %2d * %4d = %4d\n", __func__, isp1362_hcd->atl_queue.name, isp1362_hcd->atl_queue.buf_start, atl_buffers, isp1362_hcd->atl_queue.blk_size, isp1362_hcd->atl_queue.buf_size); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); return 0; } static int isp1362_hc_reset(struct usb_hcd *hcd) { int ret = 0; struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long t; unsigned long timeout = 100; unsigned long flags; int clkrdy = 0; pr_debug("%s:\n", __func__); if (isp1362_hcd->board && isp1362_hcd->board->reset) { isp1362_hcd->board->reset(hcd->self.controller, 1); msleep(20); if (isp1362_hcd->board->clock) isp1362_hcd->board->clock(hcd->self.controller, 1); isp1362_hcd->board->reset(hcd->self.controller, 0); } else isp1362_sw_reset(isp1362_hcd); /* chip has been reset. First we need to see a clock */ t = jiffies + msecs_to_jiffies(timeout); while (!clkrdy && time_before_eq(jiffies, t)) { spin_lock_irqsave(&isp1362_hcd->lock, flags); clkrdy = isp1362_read_reg16(isp1362_hcd, HCuPINT) & HCuPINT_CLKRDY; spin_unlock_irqrestore(&isp1362_hcd->lock, flags); if (!clkrdy) msleep(4); } spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg16(isp1362_hcd, HCuPINT, HCuPINT_CLKRDY); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); if (!clkrdy) { pr_err("Clock not ready after %lums\n", timeout); ret = -ENODEV; } return ret; } static void isp1362_hc_stop(struct usb_hcd *hcd) { struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; u32 tmp; pr_debug("%s:\n", __func__); del_timer_sync(&hcd->rh_timer); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); /* Switch off power for all ports */ tmp = isp1362_read_reg32(isp1362_hcd, HCRHDESCA); tmp &= ~(RH_A_NPS | RH_A_PSM); isp1362_write_reg32(isp1362_hcd, HCRHDESCA, tmp); isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS); /* Reset the chip */ if (isp1362_hcd->board && isp1362_hcd->board->reset) isp1362_hcd->board->reset(hcd->self.controller, 1); else __isp1362_sw_reset(isp1362_hcd); if (isp1362_hcd->board && isp1362_hcd->board->clock) isp1362_hcd->board->clock(hcd->self.controller, 0); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); } #ifdef CHIP_BUFFER_TEST static int isp1362_chip_test(struct isp1362_hcd *isp1362_hcd) { int ret = 0; u16 *ref; unsigned long flags; ref = kmalloc(2 * ISP1362_BUF_SIZE, GFP_KERNEL); if (ref) { int offset; u16 *tst = &ref[ISP1362_BUF_SIZE / 2]; for (offset = 0; offset < ISP1362_BUF_SIZE / 2; offset++) { ref[offset] = ~offset; tst[offset] = offset; } for (offset = 0; offset < 4; offset++) { int j; for (j = 0; j < 8; j++) { spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_buffer(isp1362_hcd, (u8 *)ref + offset, 0, j); isp1362_read_buffer(isp1362_hcd, (u8 *)tst + offset, 0, j); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); if (memcmp(ref, tst, j)) { ret = -ENODEV; pr_err("%s: memory check with %d byte offset %d failed\n", __func__, j, offset); dump_data((u8 *)ref + offset, j); dump_data((u8 *)tst + offset, j); } } } spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_buffer(isp1362_hcd, ref, 0, ISP1362_BUF_SIZE); isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); if (memcmp(ref, tst, ISP1362_BUF_SIZE)) { ret = -ENODEV; pr_err("%s: memory check failed\n", __func__); dump_data((u8 *)tst, ISP1362_BUF_SIZE / 2); } for (offset = 0; offset < 256; offset++) { int test_size = 0; yield(); memset(tst, 0, ISP1362_BUF_SIZE); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); isp1362_read_buffer(isp1362_hcd, tst, 0, ISP1362_BUF_SIZE); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); if (memcmp(tst, tst + (ISP1362_BUF_SIZE / (2 * sizeof(*tst))), ISP1362_BUF_SIZE / 2)) { pr_err("%s: Failed to clear buffer\n", __func__); dump_data((u8 *)tst, ISP1362_BUF_SIZE); break; } spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_buffer(isp1362_hcd, ref, offset * 2, PTD_HEADER_SIZE); isp1362_write_buffer(isp1362_hcd, ref + PTD_HEADER_SIZE / sizeof(*ref), offset * 2 + PTD_HEADER_SIZE, test_size); isp1362_read_buffer(isp1362_hcd, tst, offset * 2, PTD_HEADER_SIZE + test_size); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) { dump_data(((u8 *)ref) + offset, PTD_HEADER_SIZE + test_size); dump_data((u8 *)tst, PTD_HEADER_SIZE + test_size); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_read_buffer(isp1362_hcd, tst, offset * 2, PTD_HEADER_SIZE + test_size); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); if (memcmp(ref, tst, PTD_HEADER_SIZE + test_size)) { ret = -ENODEV; pr_err("%s: memory check with offset %02x failed\n", __func__, offset); break; } pr_warn("%s: memory check with offset %02x ok after second read\n", __func__, offset); } } kfree(ref); } return ret; } #endif static int isp1362_hc_start(struct usb_hcd *hcd) { int ret; struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); struct isp1362_platform_data *board = isp1362_hcd->board; u16 hwcfg; u16 chipid; unsigned long flags; pr_debug("%s:\n", __func__); spin_lock_irqsave(&isp1362_hcd->lock, flags); chipid = isp1362_read_reg16(isp1362_hcd, HCCHIPID); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); if ((chipid & HCCHIPID_MASK) != HCCHIPID_MAGIC) { pr_err("%s: Invalid chip ID %04x\n", __func__, chipid); return -ENODEV; } #ifdef CHIP_BUFFER_TEST ret = isp1362_chip_test(isp1362_hcd); if (ret) return -ENODEV; #endif spin_lock_irqsave(&isp1362_hcd->lock, flags); /* clear interrupt status and disable all interrupt sources */ isp1362_write_reg16(isp1362_hcd, HCuPINT, 0xff); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, 0); /* HW conf */ hwcfg = HCHWCFG_INT_ENABLE | HCHWCFG_DBWIDTH(1); if (board->sel15Kres) hwcfg |= HCHWCFG_PULLDOWN_DS2 | ((MAX_ROOT_PORTS > 1) ? HCHWCFG_PULLDOWN_DS1 : 0); if (board->clknotstop) hwcfg |= HCHWCFG_CLKNOTSTOP; if (board->oc_enable) hwcfg |= HCHWCFG_ANALOG_OC; if (board->int_act_high) hwcfg |= HCHWCFG_INT_POL; if (board->int_edge_triggered) hwcfg |= HCHWCFG_INT_TRIGGER; if (board->dreq_act_high) hwcfg |= HCHWCFG_DREQ_POL; if (board->dack_act_high) hwcfg |= HCHWCFG_DACK_POL; isp1362_write_reg16(isp1362_hcd, HCHWCFG, hwcfg); isp1362_show_reg(isp1362_hcd, HCHWCFG); isp1362_write_reg16(isp1362_hcd, HCDMACFG, 0); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); ret = isp1362_mem_config(hcd); if (ret) return ret; spin_lock_irqsave(&isp1362_hcd->lock, flags); /* Root hub conf */ isp1362_hcd->rhdesca = 0; if (board->no_power_switching) isp1362_hcd->rhdesca |= RH_A_NPS; if (board->power_switching_mode) isp1362_hcd->rhdesca |= RH_A_PSM; if (board->potpg) isp1362_hcd->rhdesca |= (board->potpg << 24) & RH_A_POTPGT; else isp1362_hcd->rhdesca |= (25 << 24) & RH_A_POTPGT; isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca & ~RH_A_OCPM); isp1362_write_reg32(isp1362_hcd, HCRHDESCA, isp1362_hcd->rhdesca | RH_A_OCPM); isp1362_hcd->rhdesca = isp1362_read_reg32(isp1362_hcd, HCRHDESCA); isp1362_hcd->rhdescb = RH_B_PPCM; isp1362_write_reg32(isp1362_hcd, HCRHDESCB, isp1362_hcd->rhdescb); isp1362_hcd->rhdescb = isp1362_read_reg32(isp1362_hcd, HCRHDESCB); isp1362_read_reg32(isp1362_hcd, HCFMINTVL); isp1362_write_reg32(isp1362_hcd, HCFMINTVL, (FSMP(FI) << 16) | FI); isp1362_write_reg32(isp1362_hcd, HCLSTHRESH, LSTHRESH); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); isp1362_hcd->hc_control = OHCI_USB_OPER; hcd->state = HC_STATE_RUNNING; spin_lock_irqsave(&isp1362_hcd->lock, flags); /* Set up interrupts */ isp1362_hcd->intenb = OHCI_INTR_MIE | OHCI_INTR_RHSC | OHCI_INTR_UE; isp1362_hcd->intenb |= OHCI_INTR_RD; isp1362_hcd->irqenb = HCuPINT_OPR | HCuPINT_SUSP; isp1362_write_reg32(isp1362_hcd, HCINTENB, isp1362_hcd->intenb); isp1362_write_reg16(isp1362_hcd, HCuPINTENB, isp1362_hcd->irqenb); /* Go operational */ isp1362_write_reg32(isp1362_hcd, HCCONTROL, isp1362_hcd->hc_control); /* enable global power */ isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC | RH_HS_DRWE); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); return 0; } /*-------------------------------------------------------------------------*/ static const struct hc_driver isp1362_hc_driver = { .description = hcd_name, .product_desc = "ISP1362 Host Controller", .hcd_priv_size = sizeof(struct isp1362_hcd), .irq = isp1362_irq, .flags = HCD_USB11 | HCD_MEMORY, .reset = isp1362_hc_reset, .start = isp1362_hc_start, .stop = isp1362_hc_stop, .urb_enqueue = isp1362_urb_enqueue, .urb_dequeue = isp1362_urb_dequeue, .endpoint_disable = isp1362_endpoint_disable, .get_frame_number = isp1362_get_frame, .hub_status_data = isp1362_hub_status_data, .hub_control = isp1362_hub_control, .bus_suspend = isp1362_bus_suspend, .bus_resume = isp1362_bus_resume, }; /*-------------------------------------------------------------------------*/ static void isp1362_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); remove_debug_file(isp1362_hcd); DBG(0, "%s: Removing HCD\n", __func__); usb_remove_hcd(hcd); DBG(0, "%s: put_hcd\n", __func__); usb_put_hcd(hcd); DBG(0, "%s: Done\n", __func__); } static int isp1362_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct isp1362_hcd *isp1362_hcd; struct resource *data, *irq_res; void __iomem *addr_reg; void __iomem *data_reg; int irq; int retval = 0; unsigned int irq_flags = 0; if (usb_disabled()) return -ENODEV; /* basic sanity checks first. board-specific init logic should * have initialized this the three resources and probably board * specific platform_data. we don't probe for IRQs, and do only * minimal sanity checking. */ if (pdev->num_resources < 3) return -ENODEV; irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq_res) return -ENODEV; irq = irq_res->start; addr_reg = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(addr_reg)) return PTR_ERR(addr_reg); data_reg = devm_platform_get_and_ioremap_resource(pdev, 0, &data); if (IS_ERR(data_reg)) return PTR_ERR(data_reg); /* allocate and initialize hcd */ hcd = usb_create_hcd(&isp1362_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) return -ENOMEM; hcd->rsrc_start = data->start; isp1362_hcd = hcd_to_isp1362_hcd(hcd); isp1362_hcd->data_reg = data_reg; isp1362_hcd->addr_reg = addr_reg; isp1362_hcd->next_statechange = jiffies; spin_lock_init(&isp1362_hcd->lock); INIT_LIST_HEAD(&isp1362_hcd->async); INIT_LIST_HEAD(&isp1362_hcd->periodic); INIT_LIST_HEAD(&isp1362_hcd->isoc); INIT_LIST_HEAD(&isp1362_hcd->remove_list); isp1362_hcd->board = dev_get_platdata(&pdev->dev); #if USE_PLATFORM_DELAY if (!isp1362_hcd->board->delay) { dev_err(hcd->self.controller, "No platform delay function given\n"); retval = -ENODEV; goto err; } #endif if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE) irq_flags |= IRQF_TRIGGER_RISING; if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE) irq_flags |= IRQF_TRIGGER_FALLING; if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL) irq_flags |= IRQF_TRIGGER_HIGH; if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL) irq_flags |= IRQF_TRIGGER_LOW; retval = usb_add_hcd(hcd, irq, irq_flags | IRQF_SHARED); if (retval != 0) goto err; device_wakeup_enable(hcd->self.controller); dev_info(&pdev->dev, "%s, irq %d\n", hcd->product_desc, irq); create_debug_file(isp1362_hcd); return 0; err: usb_put_hcd(hcd); return retval; } #ifdef CONFIG_PM static int isp1362_suspend(struct platform_device *pdev, pm_message_t state) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; int retval = 0; DBG(0, "%s: Suspending device\n", __func__); if (state.event == PM_EVENT_FREEZE) { DBG(0, "%s: Suspending root hub\n", __func__); retval = isp1362_bus_suspend(hcd); } else { DBG(0, "%s: Suspending RH ports\n", __func__); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPS); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); } if (retval == 0) pdev->dev.power.power_state = state; return retval; } static int isp1362_resume(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); struct isp1362_hcd *isp1362_hcd = hcd_to_isp1362_hcd(hcd); unsigned long flags; DBG(0, "%s: Resuming\n", __func__); if (pdev->dev.power.power_state.event == PM_EVENT_SUSPEND) { DBG(0, "%s: Resume RH ports\n", __func__); spin_lock_irqsave(&isp1362_hcd->lock, flags); isp1362_write_reg32(isp1362_hcd, HCRHSTATUS, RH_HS_LPSC); spin_unlock_irqrestore(&isp1362_hcd->lock, flags); return 0; } pdev->dev.power.power_state = PMSG_ON; return isp1362_bus_resume(isp1362_hcd_to_hcd(isp1362_hcd)); } #else #define isp1362_suspend NULL #define isp1362_resume NULL #endif static struct platform_driver isp1362_driver = { .probe = isp1362_probe, .remove = isp1362_remove, .suspend = isp1362_suspend, .resume = isp1362_resume, .driver = { .name = hcd_name, }, }; module_platform_driver(isp1362_driver);
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef _SMU8_SMUMGR_H_ #define _SMU8_SMUMGR_H_ #define MAX_NUM_FIRMWARE 8 #define MAX_NUM_SCRATCH 11 #define SMU8_SCRATCH_SIZE_NONGFX_CLOCKGATING 1024 #define SMU8_SCRATCH_SIZE_NONGFX_GOLDENSETTING 2048 #define SMU8_SCRATCH_SIZE_SDMA_METADATA 1024 #define SMU8_SCRATCH_SIZE_IH ((2*256+1)*4) #define SMU_EnabledFeatureScoreboard_SclkDpmOn 0x00200000 enum smu8_scratch_entry { SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA0 = 0, SMU8_SCRATCH_ENTRY_UCODE_ID_SDMA1, SMU8_SCRATCH_ENTRY_UCODE_ID_CP_CE, SMU8_SCRATCH_ENTRY_UCODE_ID_CP_PFP, SMU8_SCRATCH_ENTRY_UCODE_ID_CP_ME, SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT1, SMU8_SCRATCH_ENTRY_UCODE_ID_CP_MEC_JT2, SMU8_SCRATCH_ENTRY_UCODE_ID_GMCON_RENG, SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_G, SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SCRATCH, SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_ARAM, SMU8_SCRATCH_ENTRY_UCODE_ID_RLC_SRM_DRAM, SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_ERAM, SMU8_SCRATCH_ENTRY_UCODE_ID_DMCU_IRAM, SMU8_SCRATCH_ENTRY_UCODE_ID_POWER_PROFILING, SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_HALT, SMU8_SCRATCH_ENTRY_DATA_ID_SYS_CLOCKGATING, SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_RING_REGS, SMU8_SCRATCH_ENTRY_DATA_ID_NONGFX_REINIT, SMU8_SCRATCH_ENTRY_DATA_ID_SDMA_START, SMU8_SCRATCH_ENTRY_DATA_ID_IH_REGISTERS, SMU8_SCRATCH_ENTRY_SMU8_FUSION_CLKTABLE }; struct smu8_buffer_entry { uint32_t data_size; uint64_t mc_addr; void *kaddr; enum smu8_scratch_entry firmware_ID; struct amdgpu_bo *handle; /* as bo handle used when release bo */ }; struct smu8_register_index_data_pair { uint32_t offset; uint32_t value; }; struct smu8_ih_meta_data { uint32_t command; struct smu8_register_index_data_pair register_index_value_pair[1]; }; struct smu8_smumgr { uint8_t driver_buffer_length; uint8_t scratch_buffer_length; uint16_t toc_entry_used_count; uint16_t toc_entry_initialize_index; uint16_t toc_entry_power_profiling_index; uint16_t toc_entry_aram; uint16_t toc_entry_ih_register_restore_task_index; uint16_t toc_entry_clock_table; uint16_t ih_register_restore_task_size; uint16_t smu_buffer_used_bytes; struct smu8_buffer_entry toc_buffer; struct smu8_buffer_entry smu_buffer; struct smu8_buffer_entry firmware_buffer; struct smu8_buffer_entry driver_buffer[MAX_NUM_FIRMWARE]; struct smu8_buffer_entry meta_data_buffer[MAX_NUM_FIRMWARE]; struct smu8_buffer_entry scratch_buffer[MAX_NUM_SCRATCH]; }; #endif
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2015, 2016 ARM Ltd. */ #ifndef __KVM_ARM_VGIC_H #define __KVM_ARM_VGIC_H #include <linux/bits.h> #include <linux/kvm.h> #include <linux/irqreturn.h> #include <linux/kref.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <linux/static_key.h> #include <linux/types.h> #include <linux/xarray.h> #include <kvm/iodev.h> #include <linux/list.h> #include <linux/jump_label.h> #include <linux/irqchip/arm-gic-v4.h> #define VGIC_V3_MAX_CPUS 512 #define VGIC_V2_MAX_CPUS 8 #define VGIC_NR_IRQS_LEGACY 256 #define VGIC_NR_SGIS 16 #define VGIC_NR_PPIS 16 #define VGIC_NR_PRIVATE_IRQS (VGIC_NR_SGIS + VGIC_NR_PPIS) #define VGIC_MAX_SPI 1019 #define VGIC_MAX_RESERVED 1023 #define VGIC_MIN_LPI 8192 #define KVM_IRQCHIP_NUM_PINS (1020 - 32) #define irq_is_ppi(irq) ((irq) >= VGIC_NR_SGIS && (irq) < VGIC_NR_PRIVATE_IRQS) #define irq_is_spi(irq) ((irq) >= VGIC_NR_PRIVATE_IRQS && \ (irq) <= VGIC_MAX_SPI) enum vgic_type { VGIC_V2, /* Good ol' GICv2 */ VGIC_V3, /* New fancy GICv3 */ }; /* same for all guests, as depending only on the _host's_ GIC model */ struct vgic_global { /* type of the host GIC */ enum vgic_type type; /* Physical address of vgic virtual cpu interface */ phys_addr_t vcpu_base; /* GICV mapping, kernel VA */ void __iomem *vcpu_base_va; /* GICV mapping, HYP VA */ void __iomem *vcpu_hyp_va; /* virtual control interface mapping, kernel VA */ void __iomem *vctrl_base; /* virtual control interface mapping, HYP VA */ void __iomem *vctrl_hyp; /* Number of implemented list registers */ int nr_lr; /* Maintenance IRQ number */ unsigned int maint_irq; /* maximum number of VCPUs allowed (GICv2 limits us to 8) */ int max_gic_vcpus; /* Only needed for the legacy KVM_CREATE_IRQCHIP */ bool can_emulate_gicv2; /* Hardware has GICv4? */ bool has_gicv4; bool has_gicv4_1; /* Pseudo GICv3 from outer space */ bool no_hw_deactivation; /* GIC system register CPU interface */ struct static_key_false gicv3_cpuif; u32 ich_vtr_el2; }; extern struct vgic_global kvm_vgic_global_state; #define VGIC_V2_MAX_LRS (1 << 6) #define VGIC_V3_MAX_LRS 16 #define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr) enum vgic_irq_config { VGIC_CONFIG_EDGE = 0, VGIC_CONFIG_LEVEL }; /* * Per-irq ops overriding some common behavious. * * Always called in non-preemptible section and the functions can use * kvm_arm_get_running_vcpu() to get the vcpu pointer for private IRQs. */ struct irq_ops { /* Per interrupt flags for special-cased interrupts */ unsigned long flags; #define VGIC_IRQ_SW_RESAMPLE BIT(0) /* Clear the active state for resampling */ /* * Callback function pointer to in-kernel devices that can tell us the * state of the input level of mapped level-triggered IRQ faster than * peaking into the physical GIC. */ bool (*get_input_level)(int vintid); }; struct vgic_irq { raw_spinlock_t irq_lock; /* Protects the content of the struct */ struct rcu_head rcu; struct list_head ap_list; struct kvm_vcpu *vcpu; /* SGIs and PPIs: The VCPU * SPIs and LPIs: The VCPU whose ap_list * this is queued on. */ struct kvm_vcpu *target_vcpu; /* The VCPU that this interrupt should * be sent to, as a result of the * targets reg (v2) or the * affinity reg (v3). */ u32 intid; /* Guest visible INTID */ bool line_level; /* Level only */ bool pending_latch; /* The pending latch state used to calculate * the pending state for both level * and edge triggered IRQs. */ bool active; /* not used for LPIs */ bool enabled; bool hw; /* Tied to HW IRQ */ struct kref refcount; /* Used for LPIs */ u32 hwintid; /* HW INTID number */ unsigned int host_irq; /* linux irq corresponding to hwintid */ union { u8 targets; /* GICv2 target VCPUs mask */ u32 mpidr; /* GICv3 target VCPU */ }; u8 source; /* GICv2 SGIs only */ u8 active_source; /* GICv2 SGIs only */ u8 priority; u8 group; /* 0 == group 0, 1 == group 1 */ enum vgic_irq_config config; /* Level or edge */ struct irq_ops *ops; void *owner; /* Opaque pointer to reserve an interrupt for in-kernel devices. */ }; static inline bool vgic_irq_needs_resampling(struct vgic_irq *irq) { return irq->ops && (irq->ops->flags & VGIC_IRQ_SW_RESAMPLE); } struct vgic_register_region; struct vgic_its; enum iodev_type { IODEV_CPUIF, IODEV_DIST, IODEV_REDIST, IODEV_ITS }; struct vgic_io_device { gpa_t base_addr; union { struct kvm_vcpu *redist_vcpu; struct vgic_its *its; }; const struct vgic_register_region *regions; enum iodev_type iodev_type; int nr_regions; struct kvm_io_device dev; }; struct vgic_its { /* The base address of the ITS control register frame */ gpa_t vgic_its_base; bool enabled; struct vgic_io_device iodev; struct kvm_device *dev; /* These registers correspond to GITS_BASER{0,1} */ u64 baser_device_table; u64 baser_coll_table; /* Protects the command queue */ struct mutex cmd_lock; u64 cbaser; u32 creadr; u32 cwriter; /* migration ABI revision in use */ u32 abi_rev; /* Protects the device and collection lists */ struct mutex its_lock; struct list_head device_list; struct list_head collection_list; /* * Caches the (device_id, event_id) -> vgic_irq translation for * LPIs that are mapped and enabled. */ struct xarray translation_cache; }; struct vgic_state_iter; struct vgic_redist_region { u32 index; gpa_t base; u32 count; /* number of redistributors or 0 if single region */ u32 free_index; /* index of the next free redistributor */ struct list_head list; }; struct vgic_dist { bool in_kernel; bool ready; bool initialized; /* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */ u32 vgic_model; /* Implementation revision as reported in the GICD_IIDR */ u32 implementation_rev; #define KVM_VGIC_IMP_REV_2 2 /* GICv2 restorable groups */ #define KVM_VGIC_IMP_REV_3 3 /* GICv3 GICR_CTLR.{IW,CES,RWP} */ #define KVM_VGIC_IMP_REV_LATEST KVM_VGIC_IMP_REV_3 /* Userspace can write to GICv2 IGROUPR */ bool v2_groups_user_writable; /* Do injected MSIs require an additional device ID? */ bool msis_require_devid; int nr_spis; /* base addresses in guest physical address space: */ gpa_t vgic_dist_base; /* distributor */ union { /* either a GICv2 CPU interface */ gpa_t vgic_cpu_base; /* or a number of GICv3 redistributor regions */ struct list_head rd_regions; }; /* distributor enabled */ bool enabled; /* Wants SGIs without active state */ bool nassgireq; struct vgic_irq *spis; struct vgic_io_device dist_iodev; bool has_its; bool table_write_in_progress; /* * Contains the attributes and gpa of the LPI configuration table. * Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share * one address across all redistributors. * GICv3 spec: IHI 0069E 6.1.1 "LPI Configuration tables" */ u64 propbaser; #define LPI_XA_MARK_DEBUG_ITER XA_MARK_0 struct xarray lpi_xa; /* used by vgic-debug */ struct vgic_state_iter *iter; /* * GICv4 ITS per-VM data, containing the IRQ domain, the VPE * array, the property table pointer as well as allocation * data. This essentially ties the Linux IRQ core and ITS * together, and avoids leaking KVM's data structures anywhere * else. */ struct its_vm its_vm; }; struct vgic_v2_cpu_if { u32 vgic_hcr; u32 vgic_vmcr; u32 vgic_apr; u32 vgic_lr[VGIC_V2_MAX_LRS]; unsigned int used_lrs; }; struct vgic_v3_cpu_if { u32 vgic_hcr; u32 vgic_vmcr; u32 vgic_sre; /* Restored only, change ignored */ u32 vgic_ap0r[4]; u32 vgic_ap1r[4]; u64 vgic_lr[VGIC_V3_MAX_LRS]; /* * GICv4 ITS per-VPE data, containing the doorbell IRQ, the * pending table pointer, the its_vm pointer and a few other * HW specific things. As for the its_vm structure, this is * linking the Linux IRQ subsystem and the ITS together. */ struct its_vpe its_vpe; unsigned int used_lrs; }; struct vgic_cpu { /* CPU vif control registers for world switch */ union { struct vgic_v2_cpu_if vgic_v2; struct vgic_v3_cpu_if vgic_v3; }; struct vgic_irq *private_irqs; raw_spinlock_t ap_list_lock; /* Protects the ap_list */ /* * List of IRQs that this VCPU should consider because they are either * Active or Pending (hence the name; AP list), or because they recently * were one of the two and need to be migrated off this list to another * VCPU. */ struct list_head ap_list_head; /* * Members below are used with GICv3 emulation only and represent * parts of the redistributor. */ struct vgic_io_device rd_iodev; struct vgic_redist_region *rdreg; u32 rdreg_index; atomic_t syncr_busy; /* Contains the attributes and gpa of the LPI pending tables. */ u64 pendbaser; /* GICR_CTLR.{ENABLE_LPIS,RWP} */ atomic_t ctlr; /* Cache guest priority bits */ u32 num_pri_bits; /* Cache guest interrupt ID bits */ u32 num_id_bits; }; extern struct static_key_false vgic_v2_cpuif_trap; extern struct static_key_false vgic_v3_cpuif_trap; int kvm_set_legacy_vgic_v2_addr(struct kvm *kvm, struct kvm_arm_device_addr *dev_addr); void kvm_vgic_early_init(struct kvm *kvm); int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu); int kvm_vgic_create(struct kvm *kvm, u32 type); void kvm_vgic_destroy(struct kvm *kvm); void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu); int kvm_vgic_map_resources(struct kvm *kvm); int kvm_vgic_hyp_init(void); void kvm_vgic_init_cpu_hardware(void); int kvm_vgic_inject_irq(struct kvm *kvm, struct kvm_vcpu *vcpu, unsigned int intid, bool level, void *owner); int kvm_vgic_map_phys_irq(struct kvm_vcpu *vcpu, unsigned int host_irq, u32 vintid, struct irq_ops *ops); int kvm_vgic_unmap_phys_irq(struct kvm_vcpu *vcpu, unsigned int vintid); int kvm_vgic_get_map(struct kvm_vcpu *vcpu, unsigned int vintid); bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int vintid); int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); void kvm_vgic_load(struct kvm_vcpu *vcpu); void kvm_vgic_put(struct kvm_vcpu *vcpu); #define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) #define vgic_initialized(k) ((k)->arch.vgic.initialized) #define vgic_ready(k) ((k)->arch.vgic.ready) #define vgic_valid_spi(k, i) (((i) >= VGIC_NR_PRIVATE_IRQS) && \ ((i) < (k)->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) bool kvm_vcpu_has_pending_irqs(struct kvm_vcpu *vcpu); void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu); void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu); void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid); void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1); /** * kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW * * The host's GIC naturally limits the maximum amount of VCPUs a guest * can use. */ static inline int kvm_vgic_get_max_vcpus(void) { return kvm_vgic_global_state.max_gic_vcpus; } /** * kvm_vgic_setup_default_irq_routing: * Setup a default flat gsi routing table mapping all SPIs */ int kvm_vgic_setup_default_irq_routing(struct kvm *kvm); int kvm_vgic_set_owner(struct kvm_vcpu *vcpu, unsigned int intid, void *owner); struct kvm_kernel_irq_routing_entry; int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq, struct kvm_kernel_irq_routing_entry *irq_entry); int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq, struct kvm_kernel_irq_routing_entry *irq_entry); int vgic_v4_load(struct kvm_vcpu *vcpu); void vgic_v4_commit(struct kvm_vcpu *vcpu); int vgic_v4_put(struct kvm_vcpu *vcpu); /* CPU HP callbacks */ void kvm_vgic_cpu_up(void); void kvm_vgic_cpu_down(void); #endif /* __KVM_ARM_VGIC_H */
/* * Copyright 2018 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef _DMUB_DC_SRV_H_ #define _DMUB_DC_SRV_H_ #include "dm_services_types.h" #include "dmub/dmub_srv.h" struct dmub_srv; struct dc; struct pipe_ctx; struct dc_crtc_timing_adjust; struct dc_crtc_timing; struct dc_state; struct dc_surface_update; struct dc_reg_helper_state { bool gather_in_progress; uint32_t same_addr_count; bool should_burst_write; union dmub_rb_cmd cmd_data; unsigned int reg_seq_count; }; struct dc_dmub_srv { struct dmub_srv *dmub; struct dc_reg_helper_state reg_helper_offload; struct dc_context *ctx; void *dm; int32_t idle_exit_counter; union dmub_shared_state_ips_driver_signals driver_signals; bool idle_allowed; bool needs_idle_wake; }; void dc_dmub_srv_wait_idle(struct dc_dmub_srv *dc_dmub_srv); bool dc_dmub_srv_optimized_init_done(struct dc_dmub_srv *dc_dmub_srv); bool dc_dmub_srv_cmd_list_queue_execute(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list); bool dc_dmub_srv_wait_for_idle(struct dc_dmub_srv *dc_dmub_srv, enum dm_dmub_wait_type wait_type, union dmub_rb_cmd *cmd_list); bool dc_dmub_srv_cmd_run(struct dc_dmub_srv *dc_dmub_srv, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); bool dc_dmub_srv_cmd_run_list(struct dc_dmub_srv *dc_dmub_srv, unsigned int count, union dmub_rb_cmd *cmd_list, enum dm_dmub_wait_type wait_type); bool dc_dmub_srv_notify_stream_mask(struct dc_dmub_srv *dc_dmub_srv, unsigned int stream_mask); bool dc_dmub_srv_is_restore_required(struct dc_dmub_srv *dc_dmub_srv); bool dc_dmub_srv_get_dmub_outbox0_msg(const struct dc *dc, struct dmcub_trace_buf_entry *entry); void dc_dmub_trace_event_control(struct dc *dc, bool enable); void dc_dmub_srv_drr_update_cmd(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max); void dc_dmub_srv_set_drr_manual_trigger_cmd(struct dc *dc, uint32_t tg_inst); bool dc_dmub_srv_p_state_delegate(struct dc *dc, bool enable_pstate, struct dc_state *context); void dc_dmub_srv_query_caps_cmd(struct dc_dmub_srv *dc_dmub_srv); void dc_dmub_srv_get_visual_confirm_color_cmd(struct dc *dc, struct pipe_ctx *pipe_ctx); void dc_dmub_srv_clear_inbox0_ack(struct dc_dmub_srv *dmub_srv); void dc_dmub_srv_wait_for_inbox0_ack(struct dc_dmub_srv *dmub_srv); void dc_dmub_srv_send_inbox0_cmd(struct dc_dmub_srv *dmub_srv, union dmub_inbox0_data_register data); bool dc_dmub_srv_get_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv, struct dmub_diagnostic_data *dmub_oca); void dc_dmub_setup_subvp_dmub_command(struct dc *dc, struct dc_state *context, bool enable); void dc_dmub_srv_log_diagnostic_data(struct dc_dmub_srv *dc_dmub_srv); void dc_send_update_cursor_info_to_dmu(struct pipe_ctx *pCtx, uint8_t pipe_idx); bool dc_dmub_check_min_version(struct dmub_srv *srv); void dc_dmub_srv_enable_dpia_trace(const struct dc *dc); void dc_dmub_srv_subvp_save_surf_addr(const struct dc_dmub_srv *dc_dmub_srv, const struct dc_plane_address *addr, uint8_t subvp_index); bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait); void dc_dmub_srv_apply_idle_power_optimizations(const struct dc *dc, bool allow_idle); /** * dc_dmub_srv_set_power_state() - Sets the power state for DMUB service. * * Controls whether messaging the DMCUB or interfacing with it via HW register * interaction is permittable. * * @dc_dmub_srv - The DC DMUB service pointer * @power_state - the DC power state */ void dc_dmub_srv_set_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state); /** * dc_dmub_srv_notify_fw_dc_power_state() - Notifies firmware of the DC power state. * * Differs from dc_dmub_srv_set_power_state in that it needs to access HW in order * to message DMCUB of the state transition. Should come after the D0 exit and * before D3 set power state. * * @dc_dmub_srv - The DC DMUB service pointer * @power_state - the DC power state */ void dc_dmub_srv_notify_fw_dc_power_state(struct dc_dmub_srv *dc_dmub_srv, enum dc_acpi_cm_power_state power_state); /** * @dc_dmub_srv_should_detect() - Checks if link detection is required. * * While in idle power states we may need driver to manually redetect in * the case of a missing hotplug. Should be called from a polling timer. * * Return: true if redetection is required. */ bool dc_dmub_srv_should_detect(struct dc_dmub_srv *dc_dmub_srv); /** * dc_wake_and_execute_dmub_cmd() - Wrapper for DMUB command execution. * * Refer to dc_wake_and_execute_dmub_cmd_list() for usage and limitations, * This function is a convenience wrapper for a single command execution. * * @ctx: DC context * @cmd: The command to send/receive * @wait_type: The wait behavior for the execution * * Return: true on command submission success, false otherwise */ bool dc_wake_and_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); /** * dc_wake_and_execute_dmub_cmd_list() - Wrapper for DMUB command list execution. * * If the DMCUB hardware was asleep then it wakes the DMUB before * executing the command and attempts to re-enter if the command * submission was successful. * * This should be the preferred command submission interface provided * the DC lock is acquired. * * Entry/exit out of idle power optimizations would need to be * manually performed otherwise through dc_allow_idle_optimizations(). * * @ctx: DC context * @count: Number of commands to send/receive * @cmd: Array of commands to send * @wait_type: The wait behavior for the execution * * Return: true on command submission success, false otherwise */ bool dc_wake_and_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type); /** * dc_wake_and_execute_gpint() * * @ctx: DC context * @command_code: The command ID to send to DMCUB * @param: The parameter to message DMCUB * @response: Optional response out value - may be NULL. * @wait_type: The wait behavior for the execution */ bool dc_wake_and_execute_gpint(const struct dc_context *ctx, enum dmub_gpint_command command_code, uint16_t param, uint32_t *response, enum dm_dmub_wait_type wait_type); void dc_dmub_srv_fams2_update_config(struct dc *dc, struct dc_state *context, bool enable); void dc_dmub_srv_fams2_drr_update(struct dc *dc, uint32_t tg_inst, uint32_t vtotal_min, uint32_t vtotal_max, uint32_t vtotal_mid, uint32_t vtotal_mid_frame_num, bool program_manual_trigger); void dc_dmub_srv_fams2_passthrough_flip( struct dc *dc, struct dc_state *state, struct dc_stream_state *stream, struct dc_surface_update *srf_updates, int surface_count); /** * struct ips_residency_info - struct containing info from dmub_ips_residency_stats * * @ips_mode: The mode of IPS that the follow stats appertain to * @residency_percent: The percentage of time spent in given IPS mode in millipercent * @entry_counter: The number of entries made in to this IPS state * @total_active_time_us: uint32_t array of length 2 representing time in the given IPS mode * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits. * @total_inactive_time_us: uint32_t array of length 2 representing time outside the given IPS mode * in microseconds. Index 0 is lower 32 bits, index 1 is upper 32 bits. * @histogram: Histogram of given IPS state durations - bucket definitions in dmub_ips.c */ struct ips_residency_info { enum dmub_ips_mode ips_mode; unsigned int residency_percent; unsigned int entry_counter; unsigned int total_active_time_us[2]; unsigned int total_inactive_time_us[2]; unsigned int histogram[16]; }; /** * bool dc_dmub_srv_ips_residency_cntl() - Controls IPS residency measurement status * * @dc_dmub_srv: The DC DMUB service pointer * @start_measurement: Describes whether to start or stop measurement * * Return: true if GPINT was sent successfully, false otherwise */ bool dc_dmub_srv_ips_residency_cntl(struct dc_dmub_srv *dc_dmub_srv, bool start_measurement); /** * bool dc_dmub_srv_ips_query_residency_info() - Queries DMCUB for residency info * * @dc_dmub_srv: The DC DMUB service pointer * @output: Output struct to copy the the residency info to */ void dc_dmub_srv_ips_query_residency_info(struct dc_dmub_srv *dc_dmub_srv, struct ips_residency_info *output); #endif /* _DMUB_DC_SRV_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef BTRFS_EXTENT_IO_TREE_H #define BTRFS_EXTENT_IO_TREE_H #include <linux/rbtree.h> #include <linux/spinlock.h> #include <linux/refcount.h> #include <linux/list.h> #include <linux/wait.h> #include "misc.h" struct extent_changeset; struct btrfs_fs_info; struct btrfs_inode; /* Bits for the extent state */ enum { ENUM_BIT(EXTENT_DIRTY), ENUM_BIT(EXTENT_UPTODATE), ENUM_BIT(EXTENT_LOCKED), ENUM_BIT(EXTENT_DIO_LOCKED), ENUM_BIT(EXTENT_NEW), ENUM_BIT(EXTENT_DELALLOC), ENUM_BIT(EXTENT_DEFRAG), ENUM_BIT(EXTENT_BOUNDARY), ENUM_BIT(EXTENT_NODATASUM), ENUM_BIT(EXTENT_CLEAR_META_RESV), ENUM_BIT(EXTENT_NEED_WAIT), ENUM_BIT(EXTENT_NORESERVE), ENUM_BIT(EXTENT_QGROUP_RESERVED), ENUM_BIT(EXTENT_CLEAR_DATA_RESV), /* * Must be cleared only during ordered extent completion or on error * paths if we did not manage to submit bios and create the ordered * extents for the range. Should not be cleared during page release * and page invalidation (if there is an ordered extent in flight), * that is left for the ordered extent completion. */ ENUM_BIT(EXTENT_DELALLOC_NEW), /* * When an ordered extent successfully completes for a region marked as * a new delalloc range, use this flag when clearing a new delalloc * range to indicate that the VFS' inode number of bytes should be * incremented and the inode's new delalloc bytes decremented, in an * atomic way to prevent races with stat(2). */ ENUM_BIT(EXTENT_ADD_INODE_BYTES), /* * Set during truncate when we're clearing an entire range and we just * want the extent states to go away. */ ENUM_BIT(EXTENT_CLEAR_ALL_BITS), /* * This must be last. * * Bit not representing a state but a request for NOWAIT semantics, * e.g. when allocating memory, and must be masked out from the other * bits. */ ENUM_BIT(EXTENT_NOWAIT) }; #define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \ EXTENT_CLEAR_DATA_RESV) #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | \ EXTENT_ADD_INODE_BYTES | \ EXTENT_CLEAR_ALL_BITS) #define EXTENT_LOCK_BITS (EXTENT_LOCKED | EXTENT_DIO_LOCKED) /* * Redefined bits above which are used only in the device allocation tree, * shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV * / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit * manipulation functions */ #define CHUNK_ALLOCATED EXTENT_DIRTY #define CHUNK_TRIMMED EXTENT_DEFRAG #define CHUNK_STATE_MASK (CHUNK_ALLOCATED | \ CHUNK_TRIMMED) enum { IO_TREE_FS_PINNED_EXTENTS, IO_TREE_FS_EXCLUDED_EXTENTS, IO_TREE_BTREE_INODE_IO, IO_TREE_INODE_IO, IO_TREE_RELOC_BLOCKS, IO_TREE_TRANS_DIRTY_PAGES, IO_TREE_ROOT_DIRTY_LOG_PAGES, IO_TREE_INODE_FILE_EXTENT, IO_TREE_LOG_CSUM_RANGE, IO_TREE_SELFTEST, IO_TREE_DEVICE_ALLOC_STATE, }; struct extent_io_tree { struct rb_root state; /* * The fs_info is needed for trace points, a tree attached to an inode * needs the inode. * * owner == IO_TREE_INODE_IO - then inode is valid and fs_info can be * accessed as inode->root->fs_info */ union { struct btrfs_fs_info *fs_info; struct btrfs_inode *inode; }; /* Who owns this io tree, should be one of IO_TREE_* */ u8 owner; spinlock_t lock; }; struct extent_state { u64 start; u64 end; /* inclusive */ struct rb_node rb_node; /* ADD NEW ELEMENTS AFTER THIS */ wait_queue_head_t wq; refcount_t refs; u32 state; #ifdef CONFIG_BTRFS_DEBUG struct list_head leak_list; #endif }; struct btrfs_inode *extent_io_tree_to_inode(struct extent_io_tree *tree); const struct btrfs_inode *extent_io_tree_to_inode_const(const struct extent_io_tree *tree); const struct btrfs_fs_info *extent_io_tree_to_fs_info(const struct extent_io_tree *tree); void extent_io_tree_init(struct btrfs_fs_info *fs_info, struct extent_io_tree *tree, unsigned int owner); void extent_io_tree_release(struct extent_io_tree *tree); int __lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached); bool __try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached); static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached) { return __lock_extent(tree, start, end, EXTENT_LOCKED, cached); } static inline bool try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached) { return __try_lock_extent(tree, start, end, EXTENT_LOCKED, cached); } int __init extent_state_init_cachep(void); void __cold extent_state_free_cachep(void); u64 count_range_bits(struct extent_io_tree *tree, u64 *start, u64 search_end, u64 max_bytes, u32 bits, int contig, struct extent_state **cached_state); void free_extent_state(struct extent_state *state); bool test_range_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bit, struct extent_state *cached_state); bool test_range_bit_exists(struct extent_io_tree *tree, u64 start, u64 end, u32 bit); int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_changeset *changeset); int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached, struct extent_changeset *changeset); static inline int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached) { return __clear_extent_bit(tree, start, end, bits, cached, NULL); } static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached) { return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, cached, NULL); } static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits) { return clear_extent_bit(tree, start, end, bits, NULL); } int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_changeset *changeset); int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, struct extent_state **cached_state); static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached_state) { return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, cached_state, NULL); } static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached) { return clear_extent_bit(tree, start, end, EXTENT_DIRTY | EXTENT_DELALLOC | EXTENT_DO_ACCOUNTING, cached); } int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, u32 bits, u32 clear_bits, struct extent_state **cached_state); bool find_first_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, u32 bits, struct extent_state **cached_state); void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, u32 bits); int find_contiguous_extent_bit(struct extent_io_tree *tree, u64 start, u64 *start_ret, u64 *end_ret, u32 bits); bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start, u64 *end, u64 max_bytes, struct extent_state **cached_state); static inline int lock_dio_extent(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached) { return __lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached); } static inline bool try_lock_dio_extent(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached) { return __try_lock_extent(tree, start, end, EXTENT_DIO_LOCKED, cached); } static inline int unlock_dio_extent(struct extent_io_tree *tree, u64 start, u64 end, struct extent_state **cached) { return __clear_extent_bit(tree, start, end, EXTENT_DIO_LOCKED, cached, NULL); } #endif /* BTRFS_EXTENT_IO_TREE_H */
// SPDX-License-Identifier: GPL-2.0+ // Copyright (c) 2018 Facebook Inc. // Author: Vijay Khemka <[email protected]> /dts-v1/; #include "aspeed-g5.dtsi" #include <dt-bindings/gpio/aspeed-gpio.h> #include <dt-bindings/i2c/i2c.h> / { model = "Facebook TiogaPass BMC"; compatible = "facebook,tiogapass-bmc", "aspeed,ast2500"; aliases { serial0 = &uart1; serial4 = &uart5; /* * Hardcode the bus number of i2c switches' channels to * avoid breaking the legacy applications. */ i2c16 = &imux16; i2c17 = &imux17; i2c18 = &imux18; i2c19 = &imux19; i2c20 = &imux20; i2c21 = &imux21; i2c22 = &imux22; i2c23 = &imux23; i2c24 = &imux24; i2c25 = &imux25; i2c26 = &imux26; i2c27 = &imux27; i2c28 = &imux28; i2c29 = &imux29; i2c30 = &imux30; i2c31 = &imux31; }; chosen { stdout-path = &uart5; bootargs = "console=ttyS4,115200 earlycon"; }; memory@80000000 { reg = <0x80000000 0x20000000>; }; iio-hwmon { compatible = "iio-hwmon"; io-channels = <&adc 0>, <&adc 1>, <&adc 2>, <&adc 3>, <&adc 4>, <&adc 5>, <&adc 6>, <&adc 7>; }; }; &fmc { status = "okay"; flash@0 { status = "okay"; m25p,fast-read; #include "openbmc-flash-layout.dtsi" }; }; &spi1 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_spi1_default>; flash@0 { status = "okay"; m25p,fast-read; label = "pnor"; }; }; &lpc_snoop { status = "okay"; snoop-ports = <0x80>; }; &lpc_ctrl { // Enable lpc clock status = "okay"; }; &uart1 { // Host Console status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_txd1_default &pinctrl_rxd1_default>; }; &uart2 { // SoL Host Console status = "okay"; }; &uart3 { // SoL BMC Console status = "okay"; }; &uart5 { // BMC Console status = "okay"; }; &kcs2 { // BMC KCS channel 2 status = "okay"; aspeed,lpc-io-reg = <0xca8>; }; &kcs3 { // BMC KCS channel 3 status = "okay"; aspeed,lpc-io-reg = <0xca2>; }; &gpio { status = "okay"; gpio-line-names = /*A0-A7*/ "BMC_CPLD_FPGA_SEL","","","","","","","", /*B0-B7*/ "","BMC_DEBUG_EN","","","","BMC_PPIN","PS_PWROK", "IRQ_PVDDQ_GHJ_VRHOT_LVT3", /*C0-C7*/ "","","","","","","","", /*D0-D7*/ "BIOS_MRC_DEBUG_MSG_DIS","BOARD_REV_ID0","", "BOARD_REV_ID1","IRQ_DIMM_SAVE_LVT3","BOARD_REV_ID2", "CPU_ERR0_LVT3_BMC","CPU_ERR1_LVT3_BMC", /*E0-E7*/ "RESET_BUTTON","RESET_OUT","POWER_BUTTON", "POWER_OUT","NMI_BUTTON","","CPU0_PROCHOT_LVT3_ BMC", "CPU1_PROCHOT_LVT3_ BMC", /*F0-F7*/ "IRQ_PVDDQ_ABC_VRHOT_LVT3","", "IRQ_PVCCIN_CPU0_VRHOT_LVC3", "IRQ_PVCCIN_CPU1_VRHOT_LVC3", "IRQ_PVDDQ_KLM_VRHOT_LVT3","","P3VBAT_BRIDGE_EN","", /*G0-G7*/ "CPU_ERR2_LVT3","CPU_CATERR_LVT3","PCH_BMC_THERMTRIP", "CPU0_SKTOCC_LVT3","","","","BIOS_SMI_ACTIVE", /*H0-H7*/ "LED_POST_CODE_0","LED_POST_CODE_1","LED_POST_CODE_2", "LED_POST_CODE_3","LED_POST_CODE_4","LED_POST_CODE_5", "LED_POST_CODE_6","LED_POST_CODE_7", /*I0-I7*/ "CPU0_FIVR_FAULT_LVT3","CPU1_FIVR_FAULT_LVT3", "FORCE_ADR","UV_ADR_TRIGGER_EN","","","","", /*J0-J7*/ "","","","","","","","", /*K0-K7*/ "","","","","","","","", /*L0-L7*/ "IRQ_UV_DETECT","IRQ_OC_DETECT","HSC_TIMER_EXP","", "MEM_THERM_EVENT_PCH","PMBUS_ALERT_BUF_EN","","", /*M0-M7*/ "CPU0_RC_ERROR","CPU1_RC_ERROR","","OC_DETECT_EN", "CPU0_THERMTRIP_LATCH_LVT3", "CPU1_THERMTRIP_LATCH_LVT3","","", /*N0-N7*/ "","","","CPU_MSMI_LVT3","","BIOS_SPI_BMC_CTRL","","", /*O0-O7*/ "","","","","","","","", /*P0-P7*/ "BOARD_SKU_ID0","BOARD_SKU_ID1","BOARD_SKU_ID2", "BOARD_SKU_ID3","BOARD_SKU_ID4","BMC_PREQ", "BMC_PWR_DEBUG","RST_RSMRST", /*Q0-Q7*/ "","","","","UARTSW_LSB","UARTSW_MSB", "POST_CARD_PRES_BMC","PE_BMC_WAKE", /*R0-R7*/ "","","BMC_TCK_MUX_SEL","BMC_PRDY", "BMC_XDP_PRSNT_IN","RST_BMC_PLTRST_BUF","SLT_CFG0", "SLT_CFG1", /*S0-S7*/ "THROTTLE","BMC_READY","","HSC_SMBUS_SWITCH_EN","", "","","", /*T0-T7*/ "","","","","","","","", /*U0-U7*/ "","","","","","BMC_FAULT","","", /*V0-V7*/ "","","","FAST_PROCHOT_EN","","","","", /*W0-W7*/ "","","","","","","","", /*X0-X7*/ "","","","GLOBAL_RST_WARN", "CPU0_MEMABC_MEMHOT_LVT3_BMC", "CPU0_MEMDEF_MEMHOT_LVT3_BMC", "CPU1_MEMGHJ_MEMHOT_LVT3_BMC", "CPU1_MEMKLM_MEMHOT_LVT3_BMC", /*Y0-Y7*/ "SIO_S3","SIO_S5","BMC_JTAG_SEL","SIO_ONCONTROL","", "","","", /*Z0-Z7*/ "","SIO_POWER_GOOD","IRQ_PVDDQ_DEF_VRHOT_LVT3","", "","","","", /*AA0-AA7*/ "CPU1_SKTOCC_LVT3","IRQ_SML1_PMBUS_ALERT", "SERVER_POWER_LED","","PECI_MUX_SELECT","UV_HIGH_SET", "","POST_COMPLETE", /*AB0-AB7*/ "IRQ_HSC_FAULT","OCP_MEZZA_PRES","","","","","","", /*AC0-AC7*/ "","","","","","","",""; }; &mac0 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_rmii1_default>; clocks = <&syscon ASPEED_CLK_GATE_MAC1CLK>, <&syscon ASPEED_CLK_MAC1RCLK>; clock-names = "MACCLK", "RCLK"; use-ncsi; }; &mac1 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_rmii2_default>; use-ncsi; }; &adc { status = "okay"; }; &i2c0 { status = "okay"; //Airmax Conn B, CPU0 PIROM, CPU1 PIROM }; &i2c1 { status = "okay"; //X24 Riser i2c-mux@71 { compatible = "nxp,pca9544"; #address-cells = <1>; #size-cells = <0>; reg = <0x71>; imux16: i2c@0 { #address-cells = <1>; #size-cells = <0>; reg = <0>; ina230@45 { compatible = "ti,ina230"; reg = <0x45>; }; tmp75@48 { compatible = "ti,tmp75"; reg = <0x48>; }; tmp421@49 { compatible = "ti,tmp75"; reg = <0x49>; }; eeprom@50 { compatible = "atmel,24c64"; reg = <0x50>; pagesize = <32>; }; i2c-mux@73 { compatible = "nxp,pca9546"; #address-cells = <1>; #size-cells = <0>; reg = <0x73>; imux20: i2c@0 { #address-cells = <1>; #size-cells = <0>; reg = <0>; }; imux21: i2c@1 { #address-cells = <1>; #size-cells = <0>; reg = <1>; }; imux22: i2c@2 { #address-cells = <1>; #size-cells = <0>; reg = <2>; }; imux23: i2c@3 { #address-cells = <1>; #size-cells = <0>; reg = <3>; }; }; }; imux17: i2c@1 { #address-cells = <1>; #size-cells = <0>; reg = <1>; ina230@45 { compatible = "ti,ina230"; reg = <0x45>; }; tmp421@48 { compatible = "ti,tmp75"; reg = <0x48>; }; tmp421@49 { compatible = "ti,tmp75"; reg = <0x49>; }; eeprom@50 { compatible = "atmel,24c64"; reg = <0x50>; pagesize = <32>; }; i2c-mux@73 { compatible = "nxp,pca9546"; #address-cells = <1>; #size-cells = <0>; reg = <0x73>; imux24: i2c@0 { #address-cells = <1>; #size-cells = <0>; reg = <0>; }; imux25: i2c@1 { #address-cells = <1>; #size-cells = <0>; reg = <1>; }; imux26: i2c@2 { #address-cells = <1>; #size-cells = <0>; reg = <2>; }; imux27: i2c@3 { #address-cells = <1>; #size-cells = <0>; reg = <3>; }; }; }; imux18: i2c@2 { #address-cells = <1>; #size-cells = <0>; reg = <2>; ina230@45 { compatible = "ti,ina230"; reg = <0x45>; }; tmp421@48 { compatible = "ti,tmp75"; reg = <0x48>; }; tmp421@49 { compatible = "ti,tmp75"; reg = <0x49>; }; eeprom@50 { compatible = "atmel,24c64"; reg = <0x50>; pagesize = <32>; }; i2c-mux@73 { compatible = "nxp,pca9546"; #address-cells = <1>; #size-cells = <0>; reg = <0x73>; imux28: i2c@0 { #address-cells = <1>; #size-cells = <0>; reg = <0>; }; imux29: i2c@1 { #address-cells = <1>; #size-cells = <0>; reg = <1>; }; imux30: i2c@2 { #address-cells = <1>; #size-cells = <0>; reg = <2>; }; imux31: i2c@3 { #address-cells = <1>; #size-cells = <0>; reg = <3>; }; }; }; imux19: i2c@3 { #address-cells = <1>; #size-cells = <0>; reg = <3>; i2c-switch@40 { compatible = "ti,ina230"; reg = <0x40>; }; i2c-switch@41 { compatible = "ti,ina230"; reg = <0x41>; }; i2c-switch@45 { compatible = "ti,ina230"; reg = <0x45>; }; }; }; }; &i2c2 { status = "okay"; // Mezz Management SMBus }; &i2c3 { status = "okay"; // SMBus to Board ID EEPROM }; &i2c4 { status = "okay"; // BMC Debug Header ipmb0@10 { compatible = "ipmb-dev"; reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; i2c-protocol; }; }; &i2c5 { status = "okay"; // CPU Voltage regulators regulator@48 { compatible = "infineon,pxe1610"; reg = <0x48>; }; regulator@4a { compatible = "infineon,pxe1610"; reg = <0x4a>; }; regulator@50 { compatible = "infineon,pxe1610"; reg = <0x50>; }; regulator@52 { compatible = "infineon,pxe1610"; reg = <0x52>; }; regulator@58 { compatible = "infineon,pxe1610"; reg = <0x58>; }; regulator@5a { compatible = "infineon,pxe1610"; reg = <0x5a>; }; regulator@68 { compatible = "infineon,pxe1610"; reg = <0x68>; }; regulator@70 { compatible = "infineon,pxe1610"; reg = <0x70>; }; regulator@72 { compatible = "infineon,pxe1610"; reg = <0x72>; }; }; &i2c6 { status = "okay"; tpm@20 { compatible = "infineon,slb9645tt"; reg = <0x20>; }; tmp421@4e { compatible = "ti,tmp421"; reg = <0x4e>; }; tmp421@4f { compatible = "ti,tmp421"; reg = <0x4f>; }; eeprom@54 { compatible = "atmel,24c64"; reg = <0x54>; pagesize = <32>; }; }; &i2c7 { status = "okay"; //HSC, AirMax Conn A adm1278@45 { compatible = "adm1275"; reg = <0x45>; shunt-resistor-micro-ohms = <250>; }; }; &i2c8 { status = "okay"; tmp421@1f { compatible = "ti,tmp421"; reg = <0x1f>; }; //Mezz Sensor SMBus }; &i2c9 { status = "okay"; //USB Debug Connector ipmb0@10 { compatible = "ipmb-dev"; reg = <(0x10 | I2C_OWN_SLAVE_ADDRESS)>; i2c-protocol; }; }; &pwm_tacho { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pwm0_default &pinctrl_pwm1_default>; fan@0 { reg = <0x00>; aspeed,fan-tach-ch = /bits/ 8 <0x00>; }; fan@1 { reg = <0x01>; aspeed,fan-tach-ch = /bits/ 8 <0x02>; }; };
/* SPDX-License-Identifier: MIT */ /* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef _DCN314_RESOURCE_H_ #define _DCN314_RESOURCE_H_ #include "core_types.h" extern struct _vcs_dpi_ip_params_st dcn3_14_ip; extern struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc; #define TO_DCN314_RES_POOL(pool)\ container_of(pool, struct dcn314_resource_pool, base) struct dcn314_resource_pool { struct resource_pool base; }; bool dcn314_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate); struct resource_pool *dcn314_create_resource_pool( const struct dc_init_data *init_data, struct dc *dc); #endif /* _DCN314_RESOURCE_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ #undef TRACE_SYSTEM #define TRACE_SYSTEM csd #if !defined(_TRACE_CSD_H) || defined(TRACE_HEADER_MULTI_READ) #define _TRACE_CSD_H #include <linux/tracepoint.h> TRACE_EVENT(csd_queue_cpu, TP_PROTO(const unsigned int cpu, unsigned long callsite, smp_call_func_t func, call_single_data_t *csd), TP_ARGS(cpu, callsite, func, csd), TP_STRUCT__entry( __field(unsigned int, cpu) __field(void *, callsite) __field(void *, func) __field(void *, csd) ), TP_fast_assign( __entry->cpu = cpu; __entry->callsite = (void *)callsite; __entry->func = func; __entry->csd = csd; ), TP_printk("cpu=%u callsite=%pS func=%ps csd=%p", __entry->cpu, __entry->callsite, __entry->func, __entry->csd) ); /* * Tracepoints for a function which is called as an effect of smp_call_function.* */ DECLARE_EVENT_CLASS(csd_function, TP_PROTO(smp_call_func_t func, call_single_data_t *csd), TP_ARGS(func, csd), TP_STRUCT__entry( __field(void *, func) __field(void *, csd) ), TP_fast_assign( __entry->func = func; __entry->csd = csd; ), TP_printk("func=%ps, csd=%p", __entry->func, __entry->csd) ); DEFINE_EVENT(csd_function, csd_function_entry, TP_PROTO(smp_call_func_t func, call_single_data_t *csd), TP_ARGS(func, csd) ); DEFINE_EVENT(csd_function, csd_function_exit, TP_PROTO(smp_call_func_t func, call_single_data_t *csd), TP_ARGS(func, csd) ); #endif /* _TRACE_CSD_H */ /* This part must be outside protection */ #include <trace/define_trace.h>
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 /****************************************************************************** * * Module Name: evmisc - Miscellaneous event manager support functions * * Copyright (C) 2000 - 2023, Intel Corp. * *****************************************************************************/ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evmisc") /* Local prototypes */ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context); /******************************************************************************* * * FUNCTION: acpi_ev_is_notify_object * * PARAMETERS: node - Node to check * * RETURN: TRUE if notifies allowed on this object * * DESCRIPTION: Check type of node for a object that supports notifies. * * TBD: This could be replaced by a flag bit in the node. * ******************************************************************************/ u8 acpi_ev_is_notify_object(struct acpi_namespace_node *node) { switch (node->type) { case ACPI_TYPE_DEVICE: case ACPI_TYPE_PROCESSOR: case ACPI_TYPE_THERMAL: /* * These are the ONLY objects that can receive ACPI notifications */ return (TRUE); default: return (FALSE); } } /******************************************************************************* * * FUNCTION: acpi_ev_queue_notify_request * * PARAMETERS: node - NS node for the notified object * notify_value - Value from the Notify() request * * RETURN: Status * * DESCRIPTION: Dispatch a device notification event to a previously * installed handler. * ******************************************************************************/ acpi_status acpi_ev_queue_notify_request(struct acpi_namespace_node *node, u32 notify_value) { union acpi_operand_object *obj_desc; union acpi_operand_object *handler_list_head = NULL; union acpi_generic_state *info; u8 handler_list_id = 0; acpi_status status = AE_OK; ACPI_FUNCTION_NAME(ev_queue_notify_request); /* Are Notifies allowed on this object? */ if (!acpi_ev_is_notify_object(node)) { return (AE_TYPE); } /* Get the correct notify list type (System or Device) */ if (notify_value <= ACPI_MAX_SYS_NOTIFY) { handler_list_id = ACPI_SYSTEM_HANDLER_LIST; } else { handler_list_id = ACPI_DEVICE_HANDLER_LIST; } /* Get the notify object attached to the namespace Node */ obj_desc = acpi_ns_get_attached_object(node); if (obj_desc) { /* We have an attached object, Get the correct handler list */ handler_list_head = obj_desc->common_notify.notify_list[handler_list_id]; } /* * If there is no notify handler (Global or Local) * for this object, just ignore the notify */ if (!acpi_gbl_global_notify[handler_list_id].handler && !handler_list_head) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No notify handler for Notify, ignoring (%4.4s, %X) node %p\n", acpi_ut_get_node_name(node), notify_value, node)); return (AE_OK); } /* Setup notify info and schedule the notify dispatcher */ info = acpi_ut_create_generic_state(); if (!info) { return (AE_NO_MEMORY); } info->common.descriptor_type = ACPI_DESC_TYPE_STATE_NOTIFY; info->notify.node = node; info->notify.value = (u16)notify_value; info->notify.handler_list_id = handler_list_id; info->notify.handler_list_head = handler_list_head; info->notify.global = &acpi_gbl_global_notify[handler_list_id]; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Dispatching Notify on [%4.4s] (%s) Value 0x%2.2X (%s) Node %p\n", acpi_ut_get_node_name(node), acpi_ut_get_type_name(node->type), notify_value, acpi_ut_get_notify_name(notify_value, ACPI_TYPE_ANY), node)); status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_notify_dispatch, info); if (ACPI_FAILURE(status)) { acpi_ut_delete_generic_state(info); } return (status); } /******************************************************************************* * * FUNCTION: acpi_ev_notify_dispatch * * PARAMETERS: context - To be passed to the notify handler * * RETURN: None. * * DESCRIPTION: Dispatch a device notification event to a previously * installed handler. * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_notify_dispatch(void *context) { union acpi_generic_state *info = (union acpi_generic_state *)context; union acpi_operand_object *handler_obj; ACPI_FUNCTION_ENTRY(); /* Invoke a global notify handler if installed */ if (info->notify.global->handler) { info->notify.global->handler(info->notify.node, info->notify.value, info->notify.global->context); } /* Now invoke the local notify handler(s) if any are installed */ handler_obj = info->notify.handler_list_head; while (handler_obj) { handler_obj->notify.handler(info->notify.node, info->notify.value, handler_obj->notify.context); handler_obj = handler_obj->notify.next[info->notify.handler_list_id]; } /* All done with the info object */ acpi_ut_delete_generic_state(info); } #if (!ACPI_REDUCED_HARDWARE) /****************************************************************************** * * FUNCTION: acpi_ev_terminate * * PARAMETERS: none * * RETURN: none * * DESCRIPTION: Disable events and free memory allocated for table storage. * ******************************************************************************/ void acpi_ev_terminate(void) { u32 i; acpi_status status; ACPI_FUNCTION_TRACE(ev_terminate); if (acpi_gbl_events_initialized) { /* * Disable all event-related functionality. In all cases, on error, * print a message but obviously we don't abort. */ /* Disable all fixed events */ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) { status = acpi_disable_event(i, 0); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not disable fixed event %u", (u32) i)); } } /* Disable all GPEs in all GPE blocks */ status = acpi_ev_walk_gpe_list(acpi_hw_disable_gpe_block, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not disable GPEs in GPE block")); } status = acpi_ev_remove_global_lock_handler(); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not remove Global Lock handler")); } acpi_gbl_events_initialized = FALSE; } /* Remove SCI handlers */ status = acpi_ev_remove_all_sci_handlers(); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not remove SCI handler")); } /* Deallocate all handler objects installed within GPE info structs */ status = acpi_ev_walk_gpe_list(acpi_ev_delete_gpe_handlers, NULL); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not delete GPE handlers")); } /* Return to original mode if necessary */ if (acpi_gbl_original_mode == ACPI_SYS_MODE_LEGACY) { status = acpi_disable(); if (ACPI_FAILURE(status)) { ACPI_WARNING((AE_INFO, "AcpiDisable failed")); } } return_VOID; } #endif /* !ACPI_REDUCED_HARDWARE */
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright (c) 2019 BayLibre, SAS * Author: Neil Armstrong <[email protected]> */ #include "meson-g12b-odroid.dtsi" / { aliases { rtc0 = &rtc; }; dio2133: audio-amplifier-0 { compatible = "simple-audio-amplifier"; enable-gpios = <&gpio_ao GPIOAO_2 GPIO_ACTIVE_HIGH>; VCC-supply = <&vcc_5v>; sound-name-prefix = "U19"; status = "okay"; }; hub_5v: regulator-hub-5v { compatible = "regulator-fixed"; regulator-name = "HUB_5V"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; vin-supply = <&vcc_5v>; /* Connected to the Hub CHIPENABLE, LOW sets low power state */ gpio = <&gpio GPIOH_5 GPIO_ACTIVE_HIGH>; enable-active-high; }; /* USB hub supports both USB 2.0 and USB 3.0 root hub */ usb-hub { dr_mode = "host"; #address-cells = <1>; #size-cells = <0>; /* 2.0 hub on port 1 */ hub_2_0: hub@1 { compatible = "usb5e3,610"; reg = <1>; peer-hub = <&hub_3_0>; vdd-supply = <&usb_pwr_en>; }; /* 3.0 hub on port 4 */ hub_3_0: hub@2 { compatible = "usb5e3,620"; reg = <2>; peer-hub = <&hub_2_0>; reset-gpios = <&gpio GPIOH_4 GPIO_ACTIVE_LOW>; vdd-supply = <&vcc_5v>; }; }; sound { compatible = "amlogic,axg-sound-card"; model = "ODROID-N2"; audio-widgets = "Line", "Lineout"; audio-aux-devs = <&tdmout_b>, <&tdmout_c>, <&tdmin_a>, <&tdmin_b>, <&tdmin_c>, <&tdmin_lb>, <&dio2133>; audio-routing = "TDMOUT_B IN 0", "FRDDR_A OUT 1", "TDMOUT_B IN 1", "FRDDR_B OUT 1", "TDMOUT_B IN 2", "FRDDR_C OUT 1", "TDM_B Playback", "TDMOUT_B OUT", "TDMOUT_C IN 0", "FRDDR_A OUT 2", "TDMOUT_C IN 1", "FRDDR_B OUT 2", "TDMOUT_C IN 2", "FRDDR_C OUT 2", "TDM_C Playback", "TDMOUT_C OUT", "TDMIN_A IN 4", "TDM_B Loopback", "TDMIN_B IN 4", "TDM_B Loopback", "TDMIN_C IN 4", "TDM_B Loopback", "TDMIN_LB IN 1", "TDM_B Loopback", "TDMIN_A IN 5", "TDM_C Loopback", "TDMIN_B IN 5", "TDM_C Loopback", "TDMIN_C IN 5", "TDM_C Loopback", "TDMIN_LB IN 2", "TDM_C Loopback", "TODDR_A IN 0", "TDMIN_A OUT", "TODDR_B IN 0", "TDMIN_A OUT", "TODDR_C IN 0", "TDMIN_A OUT", "TODDR_A IN 1", "TDMIN_B OUT", "TODDR_B IN 1", "TDMIN_B OUT", "TODDR_C IN 1", "TDMIN_B OUT", "TODDR_A IN 2", "TDMIN_C OUT", "TODDR_B IN 2", "TDMIN_C OUT", "TODDR_C IN 2", "TDMIN_C OUT", "TODDR_A IN 6", "TDMIN_LB OUT", "TODDR_B IN 6", "TDMIN_LB OUT", "TODDR_C IN 6", "TDMIN_LB OUT", "U19 INL", "ACODEC LOLP", "U19 INR", "ACODEC LORP", "Lineout", "U19 OUTL", "Lineout", "U19 OUTR"; clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; assigned-clocks = <&clkc CLKID_MPLL2>, <&clkc CLKID_MPLL0>, <&clkc CLKID_MPLL1>; assigned-clock-parents = <0>, <0>, <0>; assigned-clock-rates = <294912000>, <270950400>, <393216000>; dai-link-0 { sound-dai = <&frddr_a>; }; dai-link-1 { sound-dai = <&frddr_b>; }; dai-link-2 { sound-dai = <&frddr_c>; }; dai-link-3 { sound-dai = <&toddr_a>; }; dai-link-4 { sound-dai = <&toddr_b>; }; dai-link-5 { sound-dai = <&toddr_c>; }; /* 8ch hdmi interface */ dai-link-6 { sound-dai = <&tdmif_b>; dai-format = "i2s"; dai-tdm-slot-tx-mask-0 = <1 1>; dai-tdm-slot-tx-mask-1 = <1 1>; dai-tdm-slot-tx-mask-2 = <1 1>; dai-tdm-slot-tx-mask-3 = <1 1>; mclk-fs = <256>; codec-0 { sound-dai = <&tohdmitx TOHDMITX_I2S_IN_B>; }; codec-1 { sound-dai = <&toacodec TOACODEC_IN_B>; }; }; /* i2s jack output interface */ dai-link-7 { sound-dai = <&tdmif_c>; dai-format = "i2s"; dai-tdm-slot-tx-mask-0 = <1 1>; mclk-fs = <256>; codec-0 { sound-dai = <&tohdmitx TOHDMITX_I2S_IN_C>; }; codec-1 { sound-dai = <&toacodec TOACODEC_IN_C>; }; }; /* hdmi glue */ dai-link-8 { sound-dai = <&tohdmitx TOHDMITX_I2S_OUT>; codec { sound-dai = <&hdmi_tx>; }; }; /* acodec glue */ dai-link-9 { sound-dai = <&toacodec TOACODEC_OUT>; codec { sound-dai = <&acodec>; }; }; }; }; &acodec { AVDD-supply = <&vddao_1v8>; status = "okay"; }; &ethmac { pinctrl-0 = <&eth_pins>, <&eth_rgmii_pins>; pinctrl-names = "default"; status = "okay"; phy-mode = "rgmii"; phy-handle = <&external_phy>; amlogic,tx-delay-ns = <2>; }; &ext_mdio { external_phy: ethernet-phy@0 { /* Realtek RTL8211F (0x001cc916) */ reg = <0>; max-speed = <1000>; reset-assert-us = <10000>; reset-deassert-us = <80000>; reset-gpios = <&gpio GPIOZ_15 (GPIO_ACTIVE_LOW | GPIO_OPEN_DRAIN)>; interrupt-parent = <&gpio_intc>; /* MAC_INTR on GPIOZ_14 */ interrupts = <IRQID_GPIOZ_14 IRQ_TYPE_LEVEL_LOW>; }; }; &gpio { gpio-line-names = /* GPIOZ */ "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", /* GPIOH */ "", "", "", "", "", "", "", "", "", /* BOOT */ "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", "", /* GPIOC */ "", "", "", "", "", "", "", "", /* GPIOA */ "PIN_44", /* GPIOA_0 */ "PIN_46", /* GPIOA_1 */ "PIN_45", /* GPIOA_2 */ "PIN_47", /* GPIOA_3 */ "PIN_26", /* GPIOA_4 */ "", "", "", "", "", "", "PIN_42", /* GPIOA_11 */ "PIN_32", /* GPIOA_12 */ "PIN_7", /* GPIOA_13 */ "PIN_27", /* GPIOA_14 */ "PIN_28", /* GPIOA_15 */ /* GPIOX */ "PIN_16", /* GPIOX_0 */ "PIN_18", /* GPIOX_1 */ "PIN_22", /* GPIOX_2 */ "PIN_11", /* GPIOX_3 */ "PIN_13", /* GPIOX_4 */ "PIN_33", /* GPIOX_5 */ "PIN_35", /* GPIOX_6 */ "PIN_15", /* GPIOX_7 */ "PIN_19", /* GPIOX_8 */ "PIN_21", /* GPIOX_9 */ "PIN_24", /* GPIOX_10 */ "PIN_23", /* GPIOX_11 */ "PIN_8", /* GPIOX_12 */ "PIN_10", /* GPIOX_13 */ "PIN_29", /* GPIOX_14 */ "PIN_31", /* GPIOX_15 */ "PIN_12", /* GPIOX_16 */ "PIN_3", /* GPIOX_17 */ "PIN_5", /* GPIOX_18 */ "PIN_36"; /* GPIOX_19 */ }; &i2c3 { status = "okay"; pinctrl-0 = <&i2c3_sda_a_pins>, <&i2c3_sck_a_pins>; pinctrl-names = "default"; rtc: rtc@51 { compatible = "nxp,pcf8563"; reg = <0x51>; wakeup-source; }; }; &ir { status = "okay"; pinctrl-0 = <&remote_input_ao_pins>; pinctrl-names = "default"; linux,rc-map-name = "rc-odroid"; }; /* * EMMC_D4, EMMC_D5, EMMC_D6 and EMMC_D7 pins are shared between SPI NOR pins * and eMMC Data 4 to 7 pins. * Replace emmc_data_8b_pins to emmc_data_4b_pins from sd_emmc_c pinctrl-0, * and change bus-width to 4 then spifc can be enabled. * The SW1 slide should also be set to the correct position. */ &spifc { status = "disabled"; pinctrl-0 = <&nor_pins>; pinctrl-names = "default"; mx25u64: flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "mxicy,mx25u6435f", "jedec,spi-nor"; reg = <0>; spi-max-frequency = <104000000>; }; }; &toacodec { status = "okay"; }; &usb { vbus-supply = <&usb_pwr_en>; }; &usb2_phy1 { /* Enable the hub which is connected to this port */ phy-supply = <&hub_5v>; };
/* * Copyright (c) 2016, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/kernel.h> #include <linux/refcount.h> #include <linux/mlx5/driver.h> #include <net/vxlan.h> #include "mlx5_core.h" #include "vxlan.h" struct mlx5_vxlan { struct mlx5_core_dev *mdev; /* max_num_ports is usually 4, 16 buckets is more than enough */ DECLARE_HASHTABLE(htable, 4); struct mutex sync_lock; /* sync add/del port HW operations */ }; struct mlx5_vxlan_port { struct hlist_node hlist; u16 udp_port; }; static int mlx5_vxlan_core_add_port_cmd(struct mlx5_core_dev *mdev, u16 port) { u32 in[MLX5_ST_SZ_DW(add_vxlan_udp_dport_in)] = {}; MLX5_SET(add_vxlan_udp_dport_in, in, opcode, MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT); MLX5_SET(add_vxlan_udp_dport_in, in, vxlan_udp_port, port); return mlx5_cmd_exec_in(mdev, add_vxlan_udp_dport, in); } static int mlx5_vxlan_core_del_port_cmd(struct mlx5_core_dev *mdev, u16 port) { u32 in[MLX5_ST_SZ_DW(delete_vxlan_udp_dport_in)] = {}; MLX5_SET(delete_vxlan_udp_dport_in, in, opcode, MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT); MLX5_SET(delete_vxlan_udp_dport_in, in, vxlan_udp_port, port); return mlx5_cmd_exec_in(mdev, delete_vxlan_udp_dport, in); } bool mlx5_vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { struct mlx5_vxlan_port *vxlanp; bool found = false; if (!mlx5_vxlan_allowed(vxlan)) return NULL; rcu_read_lock(); hash_for_each_possible_rcu(vxlan->htable, vxlanp, hlist, port) if (vxlanp->udp_port == port) { found = true; break; } rcu_read_unlock(); return found; } static struct mlx5_vxlan_port *vxlan_lookup_port(struct mlx5_vxlan *vxlan, u16 port) { struct mlx5_vxlan_port *vxlanp; hash_for_each_possible(vxlan->htable, vxlanp, hlist, port) if (vxlanp->udp_port == port) return vxlanp; return NULL; } int mlx5_vxlan_add_port(struct mlx5_vxlan *vxlan, u16 port) { struct mlx5_vxlan_port *vxlanp; int ret; vxlanp = kzalloc(sizeof(*vxlanp), GFP_KERNEL); if (!vxlanp) return -ENOMEM; vxlanp->udp_port = port; ret = mlx5_vxlan_core_add_port_cmd(vxlan->mdev, port); if (ret) { kfree(vxlanp); return ret; } mutex_lock(&vxlan->sync_lock); hash_add_rcu(vxlan->htable, &vxlanp->hlist, port); mutex_unlock(&vxlan->sync_lock); return 0; } int mlx5_vxlan_del_port(struct mlx5_vxlan *vxlan, u16 port) { struct mlx5_vxlan_port *vxlanp; int ret = 0; mutex_lock(&vxlan->sync_lock); vxlanp = vxlan_lookup_port(vxlan, port); if (WARN_ON(!vxlanp)) { ret = -ENOENT; goto out_unlock; } hash_del_rcu(&vxlanp->hlist); synchronize_rcu(); mlx5_vxlan_core_del_port_cmd(vxlan->mdev, port); kfree(vxlanp); out_unlock: mutex_unlock(&vxlan->sync_lock); return ret; } struct mlx5_vxlan *mlx5_vxlan_create(struct mlx5_core_dev *mdev) { struct mlx5_vxlan *vxlan; if (!MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan) || !mlx5_core_is_pf(mdev)) return ERR_PTR(-ENOTSUPP); vxlan = kzalloc(sizeof(*vxlan), GFP_KERNEL); if (!vxlan) return ERR_PTR(-ENOMEM); vxlan->mdev = mdev; mutex_init(&vxlan->sync_lock); hash_init(vxlan->htable); /* Hardware adds 4789 (IANA_VXLAN_UDP_PORT) by default */ mlx5_vxlan_add_port(vxlan, IANA_VXLAN_UDP_PORT); return vxlan; } void mlx5_vxlan_destroy(struct mlx5_vxlan *vxlan) { if (!mlx5_vxlan_allowed(vxlan)) return; mlx5_vxlan_del_port(vxlan, IANA_VXLAN_UDP_PORT); WARN_ON(!hash_empty(vxlan->htable)); kfree(vxlan); } void mlx5_vxlan_reset_to_default(struct mlx5_vxlan *vxlan) { struct mlx5_vxlan_port *vxlanp; struct hlist_node *tmp; int bkt; if (!mlx5_vxlan_allowed(vxlan)) return; hash_for_each_safe(vxlan->htable, bkt, tmp, vxlanp, hlist) { /* Don't delete default UDP port added by the HW. * Remove only user configured ports */ if (vxlanp->udp_port == IANA_VXLAN_UDP_PORT) continue; mlx5_vxlan_del_port(vxlan, vxlanp->udp_port); } }
/* * arch/arm/plat-orion/include/plat/orion-gpio.h * * Marvell Orion SoC GPIO handling. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #ifndef __PLAT_GPIO_H #define __PLAT_GPIO_H #include <linux/init.h> #include <linux/types.h> #include <linux/irqdomain.h> struct gpio_desc; /* * Orion-specific GPIO API extensions. */ void orion_gpio_set_unused(unsigned pin); void orion_gpio_set_blink(unsigned pin, int blink); int orion_gpio_led_blink_set(struct gpio_desc *desc, int state, unsigned long *delay_on, unsigned long *delay_off); #define GPIO_INPUT_OK (1 << 0) #define GPIO_OUTPUT_OK (1 << 1) void orion_gpio_set_valid(unsigned pin, int mode); /* Initialize gpiolib. */ void __init orion_gpio_init(int gpio_base, int ngpio, void __iomem *base, int mask_offset, int secondary_irq_base, int irq[4]); #endif