code
stringlengths
0
23.9M
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _IPV6_STUBS_H #define _IPV6_STUBS_H #include <linux/in6.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/dst.h> #include <net/flow.h> #include <net/neighbour.h> #include <net/sock.h> #include <net/ipv6.h> /* structs from net/ip6_fib.h */ struct fib6_info; struct fib6_nh; struct fib6_config; struct fib6_result; /* This is ugly, ideally these symbols should be built * into the core kernel. */ struct ipv6_stub { int (*ipv6_sock_mc_join)(struct sock *sk, int ifindex, const struct in6_addr *addr); int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex, const struct in6_addr *addr); struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net, const struct sock *sk, struct flowi6 *fl6, const struct in6_addr *final_dst); int (*ipv6_route_input)(struct sk_buff *skb); struct fib6_table *(*fib6_get_table)(struct net *net, u32 id); int (*fib6_lookup)(struct net *net, int oif, struct flowi6 *fl6, struct fib6_result *res, int flags); int (*fib6_table_lookup)(struct net *net, struct fib6_table *table, int oif, struct flowi6 *fl6, struct fib6_result *res, int flags); void (*fib6_select_path)(const struct net *net, struct fib6_result *res, struct flowi6 *fl6, int oif, bool oif_match, const struct sk_buff *skb, int strict); u32 (*ip6_mtu_from_fib6)(const struct fib6_result *res, const struct in6_addr *daddr, const struct in6_addr *saddr); int (*fib6_nh_init)(struct net *net, struct fib6_nh *fib6_nh, struct fib6_config *cfg, gfp_t gfp_flags, struct netlink_ext_ack *extack); void (*fib6_nh_release)(struct fib6_nh *fib6_nh); void (*fib6_nh_release_dsts)(struct fib6_nh *fib6_nh); void (*fib6_update_sernum)(struct net *net, struct fib6_info *rt); int (*ip6_del_rt)(struct net *net, struct fib6_info *rt, bool skip_notify); void (*fib6_rt_update)(struct net *net, struct fib6_info *rt, struct nl_info *info); void (*udpv6_encap_enable)(void); void (*ndisc_send_na)(struct net_device *dev, const struct in6_addr *daddr, const struct in6_addr *solicited_addr, bool router, bool solicited, bool override, bool inc_opt); #if IS_ENABLED(CONFIG_XFRM) void (*xfrm6_local_rxpmtu)(struct sk_buff *skb, u32 mtu); int (*xfrm6_udp_encap_rcv)(struct sock *sk, struct sk_buff *skb); struct sk_buff *(*xfrm6_gro_udp_encap_rcv)(struct sock *sk, struct list_head *head, struct sk_buff *skb); int (*xfrm6_rcv_encap)(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type); #endif struct neigh_table *nd_tbl; int (*ipv6_fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)); struct net_device *(*ipv6_dev_find)(struct net *net, const struct in6_addr *addr, struct net_device *dev); int (*ip6_xmit)(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority); }; extern const struct ipv6_stub *ipv6_stub __read_mostly; /* A stub used by bpf helpers. Similarly ugly as ipv6_stub */ struct ipv6_bpf_stub { int (*inet6_bind)(struct sock *sk, struct sockaddr *uaddr, int addr_len, u32 flags); struct sock *(*udp6_lib_lookup)(const struct net *net, const struct in6_addr *saddr, __be16 sport, const struct in6_addr *daddr, __be16 dport, int dif, int sdif, struct udp_table *tbl, struct sk_buff *skb); int (*ipv6_setsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, unsigned int optlen); int (*ipv6_getsockopt)(struct sock *sk, int level, int optname, sockptr_t optval, sockptr_t optlen); int (*ipv6_dev_get_saddr)(struct net *net, const struct net_device *dst_dev, const struct in6_addr *daddr, unsigned int prefs, struct in6_addr *saddr); }; extern const struct ipv6_bpf_stub *ipv6_bpf_stub __read_mostly; #endif
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2022-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong <[email protected]> */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_btree.h" #include "xfs_btree_staging.h" #include "xfs_log_format.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_inode.h" #include "xfs_alloc.h" #include "xfs_rmap.h" #include "xfs_ag.h" #include "xfs_defer.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/trace.h" #include "scrub/repair.h" #include "scrub/newbt.h" /* * Estimate proper slack values for a btree that's being reloaded. * * Under most circumstances, we'll take whatever default loading value the * btree bulk loading code calculates for us. However, there are some * exceptions to this rule: * * (0) If someone turned one of the debug knobs. * (1) If this is a per-AG btree and the AG has less than 10% space free. * (2) If this is an inode btree and the FS has less than 10% space free. * In either case, format the new btree blocks almost completely full to * minimize space usage. */ static void xrep_newbt_estimate_slack( struct xrep_newbt *xnr) { struct xfs_scrub *sc = xnr->sc; struct xfs_btree_bload *bload = &xnr->bload; uint64_t free; uint64_t sz; /* * The xfs_globals values are set to -1 (i.e. take the bload defaults) * unless someone has set them otherwise, so we just pull the values * here. */ bload->leaf_slack = xfs_globals.bload_leaf_slack; bload->node_slack = xfs_globals.bload_node_slack; if (sc->ops->type == ST_PERAG) { free = sc->sa.pag->pagf_freeblks; sz = xfs_ag_block_count(sc->mp, pag_agno(sc->sa.pag)); } else { free = percpu_counter_sum(&sc->mp->m_fdblocks); sz = sc->mp->m_sb.sb_dblocks; } /* No further changes if there's more than 10% free space left. */ if (free >= div_u64(sz, 10)) return; /* * We're low on space; load the btrees as tightly as possible. Leave * a couple of open slots in each btree block so that we don't end up * splitting the btrees like crazy after a mount. */ if (bload->leaf_slack < 0) bload->leaf_slack = 2; if (bload->node_slack < 0) bload->node_slack = 2; } /* Initialize accounting resources for staging a new AG btree. */ void xrep_newbt_init_ag( struct xrep_newbt *xnr, struct xfs_scrub *sc, const struct xfs_owner_info *oinfo, xfs_fsblock_t alloc_hint, enum xfs_ag_resv_type resv) { memset(xnr, 0, sizeof(struct xrep_newbt)); xnr->sc = sc; xnr->oinfo = *oinfo; /* structure copy */ xnr->alloc_hint = alloc_hint; xnr->resv = resv; INIT_LIST_HEAD(&xnr->resv_list); xnr->bload.max_dirty = XFS_B_TO_FSBT(sc->mp, 256U << 10); /* 256K */ xrep_newbt_estimate_slack(xnr); } /* Initialize accounting resources for staging a new inode fork btree. */ int xrep_newbt_init_inode( struct xrep_newbt *xnr, struct xfs_scrub *sc, int whichfork, const struct xfs_owner_info *oinfo) { struct xfs_ifork *ifp; ifp = kmem_cache_zalloc(xfs_ifork_cache, XCHK_GFP_FLAGS); if (!ifp) return -ENOMEM; xrep_newbt_init_ag(xnr, sc, oinfo, XFS_INO_TO_FSB(sc->mp, sc->ip->i_ino), XFS_AG_RESV_NONE); xnr->ifake.if_fork = ifp; xnr->ifake.if_fork_size = xfs_inode_fork_size(sc->ip, whichfork); return 0; } /* * Initialize accounting resources for staging a new btree. Callers are * expected to add their own reservations (and clean them up) manually. */ void xrep_newbt_init_bare( struct xrep_newbt *xnr, struct xfs_scrub *sc) { xrep_newbt_init_ag(xnr, sc, &XFS_RMAP_OINFO_ANY_OWNER, NULLFSBLOCK, XFS_AG_RESV_NONE); } /* * Designate specific blocks to be used to build our new btree. @pag must be * a passive reference. */ STATIC int xrep_newbt_add_blocks( struct xrep_newbt *xnr, struct xfs_perag *pag, const struct xfs_alloc_arg *args) { struct xfs_mount *mp = xnr->sc->mp; struct xrep_newbt_resv *resv; int error; resv = kmalloc(sizeof(struct xrep_newbt_resv), XCHK_GFP_FLAGS); if (!resv) return -ENOMEM; INIT_LIST_HEAD(&resv->list); resv->agbno = XFS_FSB_TO_AGBNO(mp, args->fsbno); resv->len = args->len; resv->used = 0; resv->pag = xfs_perag_hold(pag); if (args->tp) { ASSERT(xnr->oinfo.oi_offset == 0); error = xfs_alloc_schedule_autoreap(args, XFS_FREE_EXTENT_SKIP_DISCARD, &resv->autoreap); if (error) goto out_pag; } list_add_tail(&resv->list, &xnr->resv_list); return 0; out_pag: xfs_perag_put(resv->pag); kfree(resv); return error; } /* * Add an extent to the new btree reservation pool. Callers are required to * reap this reservation manually if the repair is cancelled. @pag must be a * passive reference. */ int xrep_newbt_add_extent( struct xrep_newbt *xnr, struct xfs_perag *pag, xfs_agblock_t agbno, xfs_extlen_t len) { struct xfs_alloc_arg args = { .tp = NULL, /* no autoreap */ .oinfo = xnr->oinfo, .fsbno = xfs_agbno_to_fsb(pag, agbno), .len = len, .resv = xnr->resv, }; return xrep_newbt_add_blocks(xnr, pag, &args); } /* Don't let our allocation hint take us beyond this AG */ static inline void xrep_newbt_validate_ag_alloc_hint( struct xrep_newbt *xnr) { struct xfs_scrub *sc = xnr->sc; xfs_agnumber_t agno = XFS_FSB_TO_AGNO(sc->mp, xnr->alloc_hint); if (agno == pag_agno(sc->sa.pag) && xfs_verify_fsbno(sc->mp, xnr->alloc_hint)) return; xnr->alloc_hint = xfs_agbno_to_fsb(sc->sa.pag, XFS_AGFL_BLOCK(sc->mp) + 1); } /* Allocate disk space for a new per-AG btree. */ STATIC int xrep_newbt_alloc_ag_blocks( struct xrep_newbt *xnr, uint64_t nr_blocks) { struct xfs_scrub *sc = xnr->sc; struct xfs_mount *mp = sc->mp; int error = 0; ASSERT(sc->sa.pag != NULL); while (nr_blocks > 0) { struct xfs_alloc_arg args = { .tp = sc->tp, .mp = mp, .oinfo = xnr->oinfo, .minlen = 1, .maxlen = nr_blocks, .prod = 1, .resv = xnr->resv, }; xfs_agnumber_t agno; xrep_newbt_validate_ag_alloc_hint(xnr); if (xnr->alloc_vextent) error = xnr->alloc_vextent(sc, &args, xnr->alloc_hint); else error = xfs_alloc_vextent_near_bno(&args, xnr->alloc_hint); if (error) return error; if (args.fsbno == NULLFSBLOCK) return -ENOSPC; agno = XFS_FSB_TO_AGNO(mp, args.fsbno); if (agno != pag_agno(sc->sa.pag)) { ASSERT(agno == pag_agno(sc->sa.pag)); return -EFSCORRUPTED; } trace_xrep_newbt_alloc_ag_blocks(sc->sa.pag, XFS_FSB_TO_AGBNO(mp, args.fsbno), args.len, xnr->oinfo.oi_owner); error = xrep_newbt_add_blocks(xnr, sc->sa.pag, &args); if (error) return error; nr_blocks -= args.len; xnr->alloc_hint = args.fsbno + args.len; error = xrep_defer_finish(sc); if (error) return error; } return 0; } /* Don't let our allocation hint take us beyond EOFS */ static inline void xrep_newbt_validate_file_alloc_hint( struct xrep_newbt *xnr) { struct xfs_scrub *sc = xnr->sc; if (xfs_verify_fsbno(sc->mp, xnr->alloc_hint)) return; xnr->alloc_hint = XFS_AGB_TO_FSB(sc->mp, 0, XFS_AGFL_BLOCK(sc->mp) + 1); } /* Allocate disk space for our new file-based btree. */ STATIC int xrep_newbt_alloc_file_blocks( struct xrep_newbt *xnr, uint64_t nr_blocks) { struct xfs_scrub *sc = xnr->sc; struct xfs_mount *mp = sc->mp; int error = 0; while (nr_blocks > 0) { struct xfs_alloc_arg args = { .tp = sc->tp, .mp = mp, .oinfo = xnr->oinfo, .minlen = 1, .maxlen = nr_blocks, .prod = 1, .resv = xnr->resv, }; struct xfs_perag *pag; xfs_agnumber_t agno; xrep_newbt_validate_file_alloc_hint(xnr); if (xnr->alloc_vextent) error = xnr->alloc_vextent(sc, &args, xnr->alloc_hint); else error = xfs_alloc_vextent_start_ag(&args, xnr->alloc_hint); if (error) return error; if (args.fsbno == NULLFSBLOCK) return -ENOSPC; agno = XFS_FSB_TO_AGNO(mp, args.fsbno); pag = xfs_perag_get(mp, agno); if (!pag) { ASSERT(0); return -EFSCORRUPTED; } trace_xrep_newbt_alloc_file_blocks(pag, XFS_FSB_TO_AGBNO(mp, args.fsbno), args.len, xnr->oinfo.oi_owner); error = xrep_newbt_add_blocks(xnr, pag, &args); xfs_perag_put(pag); if (error) return error; nr_blocks -= args.len; xnr->alloc_hint = args.fsbno + args.len; error = xrep_defer_finish(sc); if (error) return error; } return 0; } /* Allocate disk space for our new btree. */ int xrep_newbt_alloc_blocks( struct xrep_newbt *xnr, uint64_t nr_blocks) { if (xnr->sc->ip) return xrep_newbt_alloc_file_blocks(xnr, nr_blocks); return xrep_newbt_alloc_ag_blocks(xnr, nr_blocks); } /* * Free the unused part of a space extent that was reserved for a new ondisk * structure. Returns the number of EFIs logged or a negative errno. */ STATIC int xrep_newbt_free_extent( struct xrep_newbt *xnr, struct xrep_newbt_resv *resv, bool btree_committed) { struct xfs_scrub *sc = xnr->sc; xfs_agblock_t free_agbno = resv->agbno; xfs_extlen_t free_aglen = resv->len; int error; if (!btree_committed || resv->used == 0) { /* * If we're not committing a new btree or we didn't use the * space reservation, let the existing EFI free the entire * space extent. */ trace_xrep_newbt_free_blocks(resv->pag, free_agbno, free_aglen, xnr->oinfo.oi_owner); xfs_alloc_commit_autoreap(sc->tp, &resv->autoreap); return 1; } /* * We used space and committed the btree. Cancel the autoreap, remove * the written blocks from the reservation, and possibly log a new EFI * to free any unused reservation space. */ xfs_alloc_cancel_autoreap(sc->tp, &resv->autoreap); free_agbno += resv->used; free_aglen -= resv->used; if (free_aglen == 0) return 0; trace_xrep_newbt_free_blocks(resv->pag, free_agbno, free_aglen, xnr->oinfo.oi_owner); ASSERT(xnr->resv != XFS_AG_RESV_AGFL); ASSERT(xnr->resv != XFS_AG_RESV_IGNORE); /* * Use EFIs to free the reservations. This reduces the chance * that we leak blocks if the system goes down. */ error = xfs_free_extent_later(sc->tp, xfs_agbno_to_fsb(resv->pag, free_agbno), free_aglen, &xnr->oinfo, xnr->resv, XFS_FREE_EXTENT_SKIP_DISCARD); if (error) return error; return 1; } /* Free all the accounting info and disk space we reserved for a new btree. */ STATIC int xrep_newbt_free( struct xrep_newbt *xnr, bool btree_committed) { struct xfs_scrub *sc = xnr->sc; struct xrep_newbt_resv *resv, *n; unsigned int freed = 0; int error = 0; /* * If the filesystem already went down, we can't free the blocks. Skip * ahead to freeing the incore metadata because we can't fix anything. */ if (xfs_is_shutdown(sc->mp)) goto junkit; list_for_each_entry_safe(resv, n, &xnr->resv_list, list) { int ret; ret = xrep_newbt_free_extent(xnr, resv, btree_committed); list_del(&resv->list); xfs_perag_put(resv->pag); kfree(resv); if (ret < 0) { error = ret; goto junkit; } freed += ret; if (freed >= XREP_MAX_ITRUNCATE_EFIS) { error = xrep_defer_finish(sc); if (error) goto junkit; freed = 0; } } if (freed) error = xrep_defer_finish(sc); junkit: /* * If we still have reservations attached to @newbt, cleanup must have * failed and the filesystem is about to go down. Clean up the incore * reservations and try to commit to freeing the space we used. */ list_for_each_entry_safe(resv, n, &xnr->resv_list, list) { xfs_alloc_commit_autoreap(sc->tp, &resv->autoreap); list_del(&resv->list); xfs_perag_put(resv->pag); kfree(resv); } if (sc->ip) { kmem_cache_free(xfs_ifork_cache, xnr->ifake.if_fork); xnr->ifake.if_fork = NULL; } return error; } /* * Free all the accounting info and unused disk space allocations after * committing a new btree. */ int xrep_newbt_commit( struct xrep_newbt *xnr) { return xrep_newbt_free(xnr, true); } /* * Free all the accounting info and all of the disk space we reserved for a new * btree that we're not going to commit. We want to try to roll things back * cleanly for things like ENOSPC midway through allocation. */ void xrep_newbt_cancel( struct xrep_newbt *xnr) { xrep_newbt_free(xnr, false); } /* Feed one of the reserved btree blocks to the bulk loader. */ int xrep_newbt_claim_block( struct xfs_btree_cur *cur, struct xrep_newbt *xnr, union xfs_btree_ptr *ptr) { struct xrep_newbt_resv *resv; xfs_agblock_t agbno; /* * The first item in the list should always have a free block unless * we're completely out. */ resv = list_first_entry(&xnr->resv_list, struct xrep_newbt_resv, list); if (resv->used == resv->len) return -ENOSPC; /* * Peel off a block from the start of the reservation. We allocate * blocks in order to place blocks on disk in increasing record or key * order. The block reservations tend to end up on the list in * decreasing order, which hopefully results in leaf blocks ending up * together. */ agbno = resv->agbno + resv->used; resv->used++; /* If we used all the blocks in this reservation, move it to the end. */ if (resv->used == resv->len) list_move_tail(&resv->list, &xnr->resv_list); trace_xrep_newbt_claim_block(resv->pag, agbno, 1, xnr->oinfo.oi_owner); if (cur->bc_ops->ptr_len == XFS_BTREE_LONG_PTR_LEN) ptr->l = cpu_to_be64(xfs_agbno_to_fsb(resv->pag, agbno)); else ptr->s = cpu_to_be32(agbno); /* Relog all the EFIs. */ return xrep_defer_finish(xnr->sc); } /* How many reserved blocks are unused? */ unsigned int xrep_newbt_unused_blocks( struct xrep_newbt *xnr) { struct xrep_newbt_resv *resv; unsigned int unused = 0; list_for_each_entry(resv, &xnr->resv_list, list) unused += resv->len - resv->used; return unused; }
/* SPDX-License-Identifier: GPL-2.0 */ /* * bitext.h: Bit string operations on the sparc, specific to architecture. * * Copyright 2002 Pete Zaitcev <[email protected]> */ #ifndef _SPARC_BITEXT_H #define _SPARC_BITEXT_H #include <linux/spinlock.h> struct bit_map { spinlock_t lock; unsigned long *map; int size; int used; int last_off; int last_size; int first_free; int num_colors; }; int bit_map_string_get(struct bit_map *t, int len, int align); void bit_map_clear(struct bit_map *t, int offset, int len); void bit_map_init(struct bit_map *t, unsigned long *map, int size); #endif /* defined(_SPARC_BITEXT_H) */
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* m52790.h - definition for m52790 inputs and outputs Copyright (C) 2007 Hans Verkuil ([email protected]) */ #ifndef _M52790_H_ #define _M52790_H_ /* Input routing switch 1 */ #define M52790_SW1_IN_MASK 0x0003 #define M52790_SW1_IN_TUNER 0x0000 #define M52790_SW1_IN_V2 0x0001 #define M52790_SW1_IN_V3 0x0002 #define M52790_SW1_IN_V4 0x0003 /* Selects component input instead of composite */ #define M52790_SW1_YCMIX 0x0004 /* Input routing switch 2 */ #define M52790_SW2_IN_MASK 0x0300 #define M52790_SW2_IN_TUNER 0x0000 #define M52790_SW2_IN_V2 0x0100 #define M52790_SW2_IN_V3 0x0200 #define M52790_SW2_IN_V4 0x0300 /* Selects component input instead of composite */ #define M52790_SW2_YCMIX 0x0400 /* Output routing switch 1 */ /* Enable 6dB amplifier for composite out */ #define M52790_SW1_V_AMP 0x0008 /* Enable 6dB amplifier for component out */ #define M52790_SW1_YC_AMP 0x0010 /* Audio output mode */ #define M52790_SW1_AUDIO_MASK 0x00c0 #define M52790_SW1_AUDIO_MUTE 0x0000 #define M52790_SW1_AUDIO_R 0x0040 #define M52790_SW1_AUDIO_L 0x0080 #define M52790_SW1_AUDIO_STEREO 0x00c0 /* Output routing switch 2 */ /* Enable 6dB amplifier for composite out */ #define M52790_SW2_V_AMP 0x0800 /* Enable 6dB amplifier for component out */ #define M52790_SW2_YC_AMP 0x1000 /* Audio output mode */ #define M52790_SW2_AUDIO_MASK 0xc000 #define M52790_SW2_AUDIO_MUTE 0x0000 #define M52790_SW2_AUDIO_R 0x4000 #define M52790_SW2_AUDIO_L 0x8000 #define M52790_SW2_AUDIO_STEREO 0xc000 /* Common values */ #define M52790_IN_TUNER (M52790_SW1_IN_TUNER | M52790_SW2_IN_TUNER) #define M52790_IN_V2 (M52790_SW1_IN_V2 | M52790_SW2_IN_V2) #define M52790_IN_V3 (M52790_SW1_IN_V3 | M52790_SW2_IN_V3) #define M52790_IN_V4 (M52790_SW1_IN_V4 | M52790_SW2_IN_V4) #define M52790_OUT_STEREO (M52790_SW1_AUDIO_STEREO | \ M52790_SW2_AUDIO_STEREO) #define M52790_OUT_AMP_STEREO (M52790_SW1_AUDIO_STEREO | \ M52790_SW1_V_AMP | \ M52790_SW2_AUDIO_STEREO | \ M52790_SW2_V_AMP) #endif
// SPDX-License-Identifier: GPL-2.0-only /* * linux/arch/arm/common/icst307.c * * Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. * * Support functions for calculating clocks/divisors for the ICST307 * clock generators. See https://www.idt.com/ for more information * on these devices. * * This is an almost identical implementation to the ICST525 clock generator. * The s2div and idx2s files are different */ #include <linux/module.h> #include <linux/kernel.h> #include <asm/div64.h> #include "icst.h" /* * Divisors for each OD setting. */ const unsigned char icst307_s2div[8] = { 10, 2, 8, 4, 5, 7, 3, 6 }; const unsigned char icst525_s2div[8] = { 10, 2, 8, 4, 5, 7, 9, 6 }; EXPORT_SYMBOL(icst307_s2div); EXPORT_SYMBOL(icst525_s2div); unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco) { u64 dividend = p->ref * 2 * (u64)(vco.v + 8); u32 divisor = (vco.r + 2) * p->s2div[vco.s]; do_div(dividend, divisor); return (unsigned long)dividend; } EXPORT_SYMBOL(icst_hz); /* * Ascending divisor S values. */ const unsigned char icst307_idx2s[8] = { 1, 6, 3, 4, 7, 5, 2, 0 }; const unsigned char icst525_idx2s[8] = { 1, 3, 4, 7, 5, 2, 6, 0 }; EXPORT_SYMBOL(icst307_idx2s); EXPORT_SYMBOL(icst525_idx2s); struct icst_vco icst_hz_to_vco(const struct icst_params *p, unsigned long freq) { struct icst_vco vco = { .s = 1, .v = p->vd_max, .r = p->rd_max }; unsigned long f; unsigned int i = 0, rd, best = (unsigned int)-1; /* * First, find the PLL output divisor such * that the PLL output is within spec. */ do { f = freq * p->s2div[p->idx2s[i]]; if (f > p->vco_min && f <= p->vco_max) break; i++; } while (i < 8); if (i >= 8) return vco; vco.s = p->idx2s[i]; /* * Now find the closest divisor combination * which gives a PLL output of 'f'. */ for (rd = p->rd_min; rd <= p->rd_max; rd++) { unsigned long fref_div, f_pll; unsigned int vd; int f_diff; fref_div = (2 * p->ref) / rd; vd = (f + fref_div / 2) / fref_div; if (vd < p->vd_min || vd > p->vd_max) continue; f_pll = fref_div * vd; f_diff = f_pll - f; if (f_diff < 0) f_diff = -f_diff; if ((unsigned)f_diff < best) { vco.v = vd - 8; vco.r = rd - 2; if (f_diff == 0) break; best = f_diff; } } return vco; } EXPORT_SYMBOL(icst_hz_to_vco);
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2023 Loongson Technology Corporation Limited */ #ifndef __LSDC_OUTPUT_H__ #define __LSDC_OUTPUT_H__ #include "lsdc_drv.h" int ls7a1000_output_init(struct drm_device *ddev, struct lsdc_display_pipe *dispipe, struct i2c_adapter *ddc, unsigned int index); int ls7a2000_output_init(struct drm_device *ldev, struct lsdc_display_pipe *dispipe, struct i2c_adapter *ddc, unsigned int index); #endif
// SPDX-License-Identifier: GPL-2.0+ OR MIT /* * Apple S8001 "A9X" SoC * * Other names: H8G, "Elba" * * Copyright (c) 2022, Konrad Dybcio <[email protected]> */ #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/interrupt-controller/apple-aic.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/pinctrl/apple.h> / { interrupt-parent = <&aic>; #address-cells = <2>; #size-cells = <2>; clkref: clock-ref { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <24000000>; clock-output-names = "clkref"; }; cpus { #address-cells = <2>; #size-cells = <0>; cpu0: cpu@0 { compatible = "apple,twister"; reg = <0x0 0x0>; cpu-release-addr = <0 0>; /* To be filled in by loader */ enable-method = "spin-table"; device_type = "cpu"; }; cpu1: cpu@1 { compatible = "apple,twister"; reg = <0x0 0x1>; cpu-release-addr = <0 0>; /* To be filled in by loader */ enable-method = "spin-table"; device_type = "cpu"; }; }; soc { compatible = "simple-bus"; #address-cells = <2>; #size-cells = <2>; nonposted-mmio; ranges; serial0: serial@20a0c0000 { compatible = "apple,s5l-uart"; reg = <0x2 0x0a0c0000 0x0 0x4000>; reg-io-width = <4>; interrupt-parent = <&aic>; interrupts = <AIC_IRQ 218 IRQ_TYPE_LEVEL_HIGH>; /* Use the bootloader-enabled clocks for now. */ clocks = <&clkref>, <&clkref>; clock-names = "uart", "clk_uart_baud0"; status = "disabled"; }; aic: interrupt-controller@20e100000 { compatible = "apple,s8000-aic", "apple,aic"; reg = <0x2 0x0e100000 0x0 0x100000>; #interrupt-cells = <3>; interrupt-controller; }; pinctrl_ap: pinctrl@20f100000 { compatible = "apple,s8000-pinctrl", "apple,pinctrl"; reg = <0x2 0x0f100000 0x0 0x100000>; gpio-controller; #gpio-cells = <2>; gpio-ranges = <&pinctrl_ap 0 0 219>; apple,npins = <219>; interrupt-controller; #interrupt-cells = <2>; interrupt-parent = <&aic>; interrupts = <AIC_IRQ 42 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 43 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 44 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 45 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 46 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 47 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 48 IRQ_TYPE_LEVEL_HIGH>; }; pinctrl_aop: pinctrl@2100f0000 { compatible = "apple,s8000-pinctrl", "apple,pinctrl"; reg = <0x2 0x100f0000 0x0 0x100000>; gpio-controller; #gpio-cells = <2>; gpio-ranges = <&pinctrl_aop 0 0 28>; apple,npins = <28>; interrupt-controller; #interrupt-cells = <2>; interrupt-parent = <&aic>; interrupts = <AIC_IRQ 128 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 129 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 130 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 131 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 132 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 133 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 134 IRQ_TYPE_LEVEL_HIGH>; }; wdt: watchdog@2102b0000 { compatible = "apple,s8000-wdt", "apple,wdt"; reg = <0x2 0x102b0000 0x0 0x4000>; clocks = <&clkref>; interrupt-parent = <&aic>; interrupts = <AIC_IRQ 4 IRQ_TYPE_LEVEL_HIGH>; }; }; timer { compatible = "arm,armv8-timer"; interrupt-parent = <&aic>; interrupt-names = "phys", "virt"; /* Note that A9X doesn't actually have a hypervisor (EL2 is not implemented). */ interrupts = <AIC_FIQ AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>, <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>; }; };
// SPDX-License-Identifier: GPL-2.0 /* * builtin-annotate.c * * Builtin annotate command: Analyze the perf.data input file, * look up and read DSOs and symbol information and display * a histogram of results, along various sorting keys. */ #include "builtin.h" #include "util/color.h" #include <linux/list.h> #include "util/cache.h" #include <linux/rbtree.h> #include <linux/zalloc.h> #include "util/symbol.h" #include "util/debug.h" #include "util/evlist.h" #include "util/evsel.h" #include "util/annotate.h" #include "util/annotate-data.h" #include "util/event.h" #include <subcmd/parse-options.h> #include "util/parse-events.h" #include "util/sort.h" #include "util/hist.h" #include "util/dso.h" #include "util/machine.h" #include "util/map.h" #include "util/session.h" #include "util/tool.h" #include "util/data.h" #include "arch/common.h" #include "util/block-range.h" #include "util/map_symbol.h" #include "util/branch.h" #include "util/util.h" #include "ui/progress.h" #include <dlfcn.h> #include <errno.h> #include <linux/bitmap.h> #include <linux/err.h> #include <inttypes.h> struct perf_annotate { struct perf_tool tool; struct perf_session *session; #ifdef HAVE_SLANG_SUPPORT bool use_tui; #endif bool use_stdio, use_stdio2; #ifdef HAVE_GTK2_SUPPORT bool use_gtk; #endif bool skip_missing; bool has_br_stack; bool group_set; bool data_type; bool type_stat; bool insn_stat; float min_percent; const char *sym_hist_filter; const char *cpu_list; const char *target_data_type; DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS); }; /* * Given one basic block: * * from to branch_i * * ----> * * | * | block * v * * ----> * * from to branch_i+1 * * where the horizontal are the branches and the vertical is the executed * block of instructions. * * We count, for each 'instruction', the number of blocks that covered it as * well as count the ratio each branch is taken. * * We can do this without knowing the actual instruction stream by keeping * track of the address ranges. We break down ranges such that there is no * overlap and iterate from the start until the end. * * @acme: once we parse the objdump output _before_ processing the samples, * we can easily fold the branch.cycles IPC bits in. */ static void process_basic_block(struct addr_map_symbol *start, struct addr_map_symbol *end, struct branch_flags *flags) { struct symbol *sym = start->ms.sym; struct annotation *notes = sym ? symbol__annotation(sym) : NULL; struct block_range_iter iter; struct block_range *entry; struct annotated_branch *branch; /* * Sanity; NULL isn't executable and the CPU cannot execute backwards */ if (!start->addr || start->addr > end->addr) return; iter = block_range__create(start->addr, end->addr); if (!block_range_iter__valid(&iter)) return; branch = annotation__get_branch(notes); /* * First block in range is a branch target. */ entry = block_range_iter(&iter); assert(entry->is_target); entry->entry++; do { entry = block_range_iter(&iter); entry->coverage++; entry->sym = sym; if (branch) branch->max_coverage = max(branch->max_coverage, entry->coverage); } while (block_range_iter__next(&iter)); /* * Last block in rage is a branch. */ entry = block_range_iter(&iter); assert(entry->is_branch); entry->taken++; if (flags->predicted) entry->pred++; } static void process_branch_stack(struct branch_stack *bs, struct addr_location *al, struct perf_sample *sample) { struct addr_map_symbol *prev = NULL; struct branch_info *bi; int i; if (!bs || !bs->nr) return; bi = sample__resolve_bstack(sample, al); if (!bi) return; for (i = bs->nr - 1; i >= 0; i--) { /* * XXX filter against symbol */ if (prev) process_basic_block(prev, &bi[i].from, &bi[i].flags); prev = &bi[i].to; } free(bi); } static int hist_iter__branch_callback(struct hist_entry_iter *iter, struct addr_location *al __maybe_unused, bool single __maybe_unused, void *arg __maybe_unused) { struct hist_entry *he = iter->he; struct branch_info *bi; struct perf_sample *sample = iter->sample; struct evsel *evsel = iter->evsel; int err; bi = he->branch_info; err = addr_map_symbol__inc_samples(&bi->from, sample, evsel); if (err) goto out; err = addr_map_symbol__inc_samples(&bi->to, sample, evsel); out: return err; } static int process_branch_callback(struct evsel *evsel, struct perf_sample *sample, struct addr_location *al, struct perf_annotate *ann, struct machine *machine) { struct hist_entry_iter iter = { .evsel = evsel, .sample = sample, .add_entry_cb = hist_iter__branch_callback, .hide_unresolved = symbol_conf.hide_unresolved, .ops = &hist_iter_branch, }; struct addr_location a; int ret; addr_location__init(&a); if (machine__resolve(machine, &a, sample) < 0) { ret = -1; goto out; } if (a.sym == NULL) { ret = 0; goto out; } if (a.map != NULL) dso__set_hit(map__dso(a.map)); hist__account_cycles(sample->branch_stack, al, sample, false, NULL, evsel); ret = hist_entry_iter__add(&iter, &a, PERF_MAX_STACK_DEPTH, ann); out: addr_location__exit(&a); return ret; } static bool has_annotation(struct perf_annotate *ann) { return ui__has_annotation() || ann->use_stdio2; } static int evsel__add_sample(struct evsel *evsel, struct perf_sample *sample, struct addr_location *al, struct perf_annotate *ann, struct machine *machine) { struct hists *hists = evsel__hists(evsel); struct hist_entry *he; int ret; if ((!ann->has_br_stack || !has_annotation(ann)) && ann->sym_hist_filter != NULL && (al->sym == NULL || strcmp(ann->sym_hist_filter, al->sym->name) != 0)) { /* We're only interested in a symbol named sym_hist_filter */ /* * FIXME: why isn't this done in the symbol_filter when loading * the DSO? */ if (al->sym != NULL) { struct dso *dso = map__dso(al->map); rb_erase_cached(&al->sym->rb_node, dso__symbols(dso)); symbol__delete(al->sym); dso__reset_find_symbol_cache(dso); } return 0; } /* * XXX filtered samples can still have branch entries pointing into our * symbol and are missed. */ process_branch_stack(sample->branch_stack, al, sample); if (ann->has_br_stack && has_annotation(ann)) return process_branch_callback(evsel, sample, al, ann, machine); he = hists__add_entry(hists, al, NULL, NULL, NULL, NULL, sample, true); if (he == NULL) return -ENOMEM; ret = hist_entry__inc_addr_samples(he, sample, evsel, al->addr); hists__inc_nr_samples(hists, true); return ret; } static int process_sample_event(const struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct evsel *evsel, struct machine *machine) { struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool); struct addr_location al; int ret = 0; addr_location__init(&al); if (machine__resolve(machine, &al, sample) < 0) { pr_warning("problem processing %d event, skipping it.\n", event->header.type); ret = -1; goto out_put; } if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap)) goto out_put; if (!al.filtered && evsel__add_sample(evsel, sample, &al, ann, machine)) { pr_warning("problem incrementing symbol count, " "skipping event\n"); ret = -1; } out_put: addr_location__exit(&al); return ret; } static int process_feature_event(struct perf_session *session, union perf_event *event) { if (event->feat.feat_id < HEADER_LAST_FEATURE) return perf_event__process_feature(session, event); return 0; } static int hist_entry__tty_annotate(struct hist_entry *he, struct evsel *evsel, struct perf_annotate *ann) { if (!ann->use_stdio2) return symbol__tty_annotate(&he->ms, evsel); return symbol__tty_annotate2(&he->ms, evsel); } static void print_annotate_data_stat(struct annotated_data_stat *s) { #define PRINT_STAT(fld) if (s->fld) printf("%10d : %s\n", s->fld, #fld) int bad = s->no_sym + s->no_insn + s->no_insn_ops + s->no_mem_ops + s->no_reg + s->no_dbginfo + s->no_cuinfo + s->no_var + s->no_typeinfo + s->invalid_size + s->bad_offset; int ok = s->total - bad; printf("Annotate data type stats:\n"); printf("total %d, ok %d (%.1f%%), bad %d (%.1f%%)\n", s->total, ok, 100.0 * ok / (s->total ?: 1), bad, 100.0 * bad / (s->total ?: 1)); printf("-----------------------------------------------------------\n"); PRINT_STAT(no_sym); PRINT_STAT(no_insn); PRINT_STAT(no_insn_ops); PRINT_STAT(no_mem_ops); PRINT_STAT(no_reg); PRINT_STAT(no_dbginfo); PRINT_STAT(no_cuinfo); PRINT_STAT(no_var); PRINT_STAT(no_typeinfo); PRINT_STAT(invalid_size); PRINT_STAT(bad_offset); PRINT_STAT(insn_track); printf("\n"); #undef PRINT_STAT } static void print_annotate_item_stat(struct list_head *head, const char *title) { struct annotated_item_stat *istat, *pos, *iter; int total_good, total_bad, total; int sum1, sum2; LIST_HEAD(tmp); /* sort the list by count */ list_splice_init(head, &tmp); total_good = total_bad = 0; list_for_each_entry_safe(istat, pos, &tmp, list) { total_good += istat->good; total_bad += istat->bad; sum1 = istat->good + istat->bad; list_for_each_entry(iter, head, list) { sum2 = iter->good + iter->bad; if (sum1 > sum2) break; } list_move_tail(&istat->list, &iter->list); } total = total_good + total_bad; printf("Annotate %s stats\n", title); printf("total %d, ok %d (%.1f%%), bad %d (%.1f%%)\n\n", total, total_good, 100.0 * total_good / (total ?: 1), total_bad, 100.0 * total_bad / (total ?: 1)); printf(" %-20s: %5s %5s\n", "Name/opcode", "Good", "Bad"); printf("-----------------------------------------------------------\n"); list_for_each_entry(istat, head, list) printf(" %-20s: %5d %5d\n", istat->name, istat->good, istat->bad); printf("\n"); } static void hists__find_annotations(struct hists *hists, struct evsel *evsel, struct perf_annotate *ann) { struct rb_node *nd = rb_first_cached(&hists->entries), *next; int key = K_RIGHT; if (ann->type_stat) print_annotate_data_stat(&ann_data_stat); if (ann->insn_stat) print_annotate_item_stat(&ann_insn_stat, "Instruction"); while (nd) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); struct annotation *notes; if (he->ms.sym == NULL || dso__annotate_warned(map__dso(he->ms.map))) goto find_next; if (ann->sym_hist_filter && (strcmp(he->ms.sym->name, ann->sym_hist_filter) != 0)) goto find_next; if (ann->min_percent) { float percent = 0; u64 total = hists__total_period(hists); if (total) percent = 100.0 * he->stat.period / total; if (percent < ann->min_percent) goto find_next; } notes = symbol__annotation(he->ms.sym); if (notes->src == NULL) { find_next: if (key == K_LEFT || key == '<') nd = rb_prev(nd); else nd = rb_next(nd); continue; } if (ann->data_type) { /* skip unknown type */ if (he->mem_type->histograms == NULL) goto find_next; if (ann->target_data_type) { const char *type_name = he->mem_type->self.type_name; /* skip 'struct ' prefix in the type name */ if (strncmp(ann->target_data_type, "struct ", 7) && !strncmp(type_name, "struct ", 7)) type_name += 7; /* skip 'union ' prefix in the type name */ if (strncmp(ann->target_data_type, "union ", 6) && !strncmp(type_name, "union ", 6)) type_name += 6; if (strcmp(ann->target_data_type, type_name)) goto find_next; } if (use_browser == 1) key = hist_entry__annotate_data_tui(he, evsel, NULL); else key = hist_entry__annotate_data_tty(he, evsel); switch (key) { case -1: if (!ann->skip_missing) return; /* fall through */ case K_RIGHT: case '>': next = rb_next(nd); break; case K_LEFT: case '<': next = rb_prev(nd); break; default: return; } if (use_browser == 0 || next != NULL) nd = next; continue; } if (use_browser == 2) { int ret; int (*annotate)(struct hist_entry *he, struct evsel *evsel, struct hist_browser_timer *hbt); annotate = dlsym(perf_gtk_handle, "hist_entry__gtk_annotate"); if (annotate == NULL) { ui__error("GTK browser not found!\n"); return; } ret = annotate(he, evsel, NULL); if (!ret || !ann->skip_missing) return; /* skip missing symbols */ nd = rb_next(nd); } else if (use_browser == 1) { key = hist_entry__tui_annotate(he, evsel, NULL); switch (key) { case -1: if (!ann->skip_missing) return; /* fall through */ case K_RIGHT: case '>': next = rb_next(nd); break; case K_LEFT: case '<': next = rb_prev(nd); break; default: return; } if (next != NULL) nd = next; } else { hist_entry__tty_annotate(he, evsel, ann); nd = rb_next(nd); } } } static int __cmd_annotate(struct perf_annotate *ann) { int ret; struct perf_session *session = ann->session; struct evsel *pos; u64 total_nr_samples; if (ann->cpu_list) { ret = perf_session__cpu_bitmap(session, ann->cpu_list, ann->cpu_bitmap); if (ret) goto out; } if (!annotate_opts.objdump_path) { ret = perf_env__lookup_objdump(&session->header.env, &annotate_opts.objdump_path); if (ret) goto out; } ret = perf_session__process_events(session); if (ret) goto out; if (dump_trace) { perf_session__fprintf_nr_events(session, stdout); evlist__fprintf_nr_events(session->evlist, stdout); goto out; } if (verbose > 3) perf_session__fprintf(session, stdout); if (verbose > 2) perf_session__fprintf_dsos(session, stdout); total_nr_samples = 0; evlist__for_each_entry(session->evlist, pos) { struct hists *hists = evsel__hists(pos); u32 nr_samples = hists->stats.nr_samples; struct ui_progress prog; if (nr_samples > 0) { total_nr_samples += nr_samples; ui_progress__init(&prog, nr_samples, "Merging related events..."); hists__collapse_resort(hists, &prog); ui_progress__finish(); /* Don't sort callchain */ evsel__reset_sample_bit(pos, CALLCHAIN); ui_progress__init(&prog, nr_samples, "Sorting events for output..."); evsel__output_resort(pos, &prog); ui_progress__finish(); /* * An event group needs to display other events too. * Let's delay printing until other events are processed. */ if (symbol_conf.event_group) { if (!evsel__is_group_leader(pos)) { struct hists *leader_hists; leader_hists = evsel__hists(evsel__leader(pos)); hists__match(leader_hists, hists); hists__link(leader_hists, hists); } continue; } hists__find_annotations(hists, pos, ann); } } if (total_nr_samples == 0) { ui__error("The %s data has no samples!\n", session->data->path); goto out; } /* Display group events together */ evlist__for_each_entry(session->evlist, pos) { struct hists *hists = evsel__hists(pos); u32 nr_samples = hists->stats.nr_samples; struct ui_progress prog; struct evsel *evsel; if (!symbol_conf.event_group || !evsel__is_group_leader(pos)) continue; for_each_group_member(evsel, pos) nr_samples += evsel__hists(evsel)->stats.nr_samples; if (nr_samples == 0) continue; ui_progress__init(&prog, nr_samples, "Sorting group events for output..."); evsel__output_resort(pos, &prog); ui_progress__finish(); hists__find_annotations(hists, pos, ann); } if (use_browser == 2) { void (*show_annotations)(void); show_annotations = dlsym(perf_gtk_handle, "perf_gtk__show_annotations"); if (show_annotations == NULL) { ui__error("GTK browser not found!\n"); goto out; } show_annotations(); } out: return ret; } static int parse_percent_limit(const struct option *opt, const char *str, int unset __maybe_unused) { struct perf_annotate *ann = opt->value; double pcnt = strtof(str, NULL); ann->min_percent = pcnt; return 0; } static int parse_data_type(const struct option *opt, const char *str, int unset) { struct perf_annotate *ann = opt->value; ann->data_type = !unset; if (str) ann->target_data_type = strdup(str); return 0; } static const char * const annotate_usage[] = { "perf annotate [<options>]", NULL }; int cmd_annotate(int argc, const char **argv) { struct perf_annotate annotate = {}; struct perf_data data = { .mode = PERF_DATA_MODE_READ, }; struct itrace_synth_opts itrace_synth_opts = { .set = 0, }; const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL; struct option options[] = { OPT_STRING('i', "input", &input_name, "file", "input file name"), OPT_STRING('d', "dsos", &symbol_conf.dso_list_str, "dso[,dso...]", "only consider symbols in these dsos"), OPT_STRING('s', "symbol", &annotate.sym_hist_filter, "symbol", "symbol to annotate"), OPT_BOOLEAN('f', "force", &data.force, "don't complain, do it"), OPT_INCR('v', "verbose", &verbose, "be more verbose (show symbol address, etc)"), OPT_BOOLEAN('q', "quiet", &quiet, "do now show any warnings or messages"), OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace, "dump raw trace in ASCII"), #ifdef HAVE_GTK2_SUPPORT OPT_BOOLEAN(0, "gtk", &annotate.use_gtk, "Use the GTK interface"), #endif #ifdef HAVE_SLANG_SUPPORT OPT_BOOLEAN(0, "tui", &annotate.use_tui, "Use the TUI interface"), #endif OPT_BOOLEAN(0, "stdio", &annotate.use_stdio, "Use the stdio interface"), OPT_BOOLEAN(0, "stdio2", &annotate.use_stdio2, "Use the stdio interface"), OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux, "don't load vmlinux even if found"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_BOOLEAN('m', "modules", &symbol_conf.use_modules, "load module symbols - WARNING: use only with -k and LIVE kernel"), OPT_BOOLEAN('l', "print-line", &annotate_opts.print_lines, "print matching source lines (may be slow)"), OPT_BOOLEAN('P', "full-paths", &annotate_opts.full_path, "Don't shorten the displayed pathnames"), OPT_BOOLEAN(0, "skip-missing", &annotate.skip_missing, "Skip symbols that cannot be annotated"), OPT_BOOLEAN_SET(0, "group", &symbol_conf.event_group, &annotate.group_set, "Show event group information together"), OPT_STRING('C', "cpu", &annotate.cpu_list, "cpu", "list of cpus to profile"), OPT_CALLBACK(0, "symfs", NULL, "directory", "Look for files with symbols relative to this directory", symbol__config_symfs), OPT_BOOLEAN(0, "source", &annotate_opts.annotate_src, "Interleave source code with assembly code (default)"), OPT_BOOLEAN(0, "asm-raw", &annotate_opts.show_asm_raw, "Display raw encoding of assembly instructions (default)"), OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style", "Specify disassembler style (e.g. -M intel for intel syntax)"), OPT_STRING(0, "prefix", &annotate_opts.prefix, "prefix", "Add prefix to source file path names in programs (with --prefix-strip)"), OPT_STRING(0, "prefix-strip", &annotate_opts.prefix_strip, "N", "Strip first N entries of source file path name in programs (with --prefix)"), OPT_STRING(0, "objdump", &objdump_path, "path", "objdump binary to use for disassembly and annotations"), OPT_STRING(0, "addr2line", &addr2line_path, "path", "addr2line binary to use for line numbers"), OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, "Enable symbol demangling"), OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, "Enable kernel symbol demangling"), OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period, "Show a column with the sum of periods"), OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples, "Show a column with the number of samples"), OPT_CALLBACK_DEFAULT(0, "stdio-color", NULL, "mode", "'always' (default), 'never' or 'auto' only applicable to --stdio mode", stdio__config_color, "always"), OPT_CALLBACK(0, "percent-type", &annotate_opts, "local-period", "Set percent type local/global-period/hits", annotate_parse_percent_type), OPT_CALLBACK(0, "percent-limit", &annotate, "percent", "Don't show entries under that percent", parse_percent_limit), OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts", "Instruction Tracing options\n" ITRACE_HELP, itrace_parse_synth_opts), OPT_CALLBACK_OPTARG(0, "data-type", &annotate, NULL, "name", "Show data type annotate for the memory accesses", parse_data_type), OPT_BOOLEAN(0, "type-stat", &annotate.type_stat, "Show stats for the data type annotation"), OPT_BOOLEAN(0, "insn-stat", &annotate.insn_stat, "Show instruction stats for the data type annotation"), OPT_BOOLEAN(0, "skip-empty", &symbol_conf.skip_empty, "Do not display empty (or dummy) events in the output"), OPT_END() }; int ret; set_option_flag(options, 0, "show-total-period", PARSE_OPT_EXCLUSIVE); set_option_flag(options, 0, "show-nr-samples", PARSE_OPT_EXCLUSIVE); annotation_options__init(); ret = hists__init(); if (ret < 0) return ret; annotation_config__init(); argc = parse_options(argc, argv, options, annotate_usage, 0); if (argc) { /* * Special case: if there's an argument left then assume that * it's a symbol filter: */ if (argc > 1) usage_with_options(annotate_usage, options); annotate.sym_hist_filter = argv[0]; } if (disassembler_style) { annotate_opts.disassembler_style = strdup(disassembler_style); if (!annotate_opts.disassembler_style) return -ENOMEM; } if (objdump_path) { annotate_opts.objdump_path = strdup(objdump_path); if (!annotate_opts.objdump_path) return -ENOMEM; } if (addr2line_path) { symbol_conf.addr2line_path = strdup(addr2line_path); if (!symbol_conf.addr2line_path) return -ENOMEM; } if (annotate_check_args() < 0) return -EINVAL; #ifdef HAVE_GTK2_SUPPORT if (symbol_conf.show_nr_samples && annotate.use_gtk) { pr_err("--show-nr-samples is not available in --gtk mode at this time\n"); return ret; } #endif #ifndef HAVE_LIBDW_SUPPORT if (annotate.data_type) { pr_err("Error: Data type profiling is disabled due to missing DWARF support\n"); return -ENOTSUP; } #endif ret = symbol__validate_sym_arguments(); if (ret) return ret; if (quiet) perf_quiet_option(); data.path = input_name; perf_tool__init(&annotate.tool, /*ordered_events=*/true); annotate.tool.sample = process_sample_event; annotate.tool.mmap = perf_event__process_mmap; annotate.tool.mmap2 = perf_event__process_mmap2; annotate.tool.comm = perf_event__process_comm; annotate.tool.exit = perf_event__process_exit; annotate.tool.fork = perf_event__process_fork; annotate.tool.namespaces = perf_event__process_namespaces; annotate.tool.attr = perf_event__process_attr; annotate.tool.build_id = perf_event__process_build_id; #ifdef HAVE_LIBTRACEEVENT annotate.tool.tracing_data = perf_event__process_tracing_data; #endif annotate.tool.id_index = perf_event__process_id_index; annotate.tool.auxtrace_info = perf_event__process_auxtrace_info; annotate.tool.auxtrace = perf_event__process_auxtrace; annotate.tool.feature = process_feature_event; annotate.tool.ordering_requires_timestamps = true; annotate.session = perf_session__new(&data, &annotate.tool); if (IS_ERR(annotate.session)) return PTR_ERR(annotate.session); annotate.session->itrace_synth_opts = &itrace_synth_opts; annotate.has_br_stack = perf_header__has_feat(&annotate.session->header, HEADER_BRANCH_STACK); if (annotate.group_set) evlist__force_leader(annotate.session->evlist); ret = symbol__annotation_init(); if (ret < 0) goto out_delete; symbol_conf.try_vmlinux_path = true; ret = symbol__init(&annotate.session->header.env); if (ret < 0) goto out_delete; if (annotate.use_stdio || annotate.use_stdio2) use_browser = 0; #ifdef HAVE_SLANG_SUPPORT else if (annotate.use_tui) use_browser = 1; #endif #ifdef HAVE_GTK2_SUPPORT else if (annotate.use_gtk) use_browser = 2; #endif if (annotate.data_type) { annotate_opts.annotate_src = false; symbol_conf.annotate_data_member = true; symbol_conf.annotate_data_sample = true; } setup_browser(true); /* * Events of different processes may correspond to the same * symbol, we do not care about the processes in annotate, * set sort order to avoid repeated output. */ if (annotate.data_type) sort_order = "dso,type"; else sort_order = "dso,symbol"; /* * Set SORT_MODE__BRANCH so that annotate displays IPC/Cycle and * branch counters, if the corresponding branch info is available * in the perf data in the TUI mode. */ if ((use_browser == 1 || annotate.use_stdio2) && annotate.has_br_stack) { sort__mode = SORT_MODE__BRANCH; if (annotate.session->evlist->nr_br_cntr > 0) annotate_opts.show_br_cntr = true; } if (setup_sorting(NULL) < 0) usage_with_options(annotate_usage, options); ret = __cmd_annotate(&annotate); out_delete: /* * Speed up the exit process by only deleting for debug builds. For * large files this can save time. */ #ifndef NDEBUG perf_session__delete(annotate.session); #endif annotation_options__exit(); return ret; }
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * AMCC SoC PPC4xx Crypto Driver * * Copyright (c) 2008 Applied Micro Circuits Corporation. * All rights reserved. James Hsiao <[email protected]> * * This file defines the security context * associate format. */ #ifndef __CRYPTO4XX_TRNG_H__ #define __CRYPTO4XX_TRNG_H__ #ifdef CONFIG_HW_RANDOM_PPC4XX void ppc4xx_trng_probe(struct crypto4xx_core_device *core_dev); void ppc4xx_trng_remove(struct crypto4xx_core_device *core_dev); #else static inline void ppc4xx_trng_probe( struct crypto4xx_core_device *dev __maybe_unused) { } static inline void ppc4xx_trng_remove( struct crypto4xx_core_device *dev __maybe_unused) { } #endif #endif
// SPDX-License-Identifier: GPL-2.0 /* * Cadence USBSS DRD Driver. * * Copyright (C) 2018-2020 Cadence. * Copyright (C) 2017-2018 NXP * Copyright (C) 2019 Texas Instruments * * * Author: Peter Chen <[email protected]> * Pawel Laszczak <[email protected]> * Roger Quadros <[email protected]> */ #include <linux/module.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "core.h" #include "gadget-export.h" #include "drd.h" static int set_phy_power_on(struct cdns *cdns) { int ret; ret = phy_power_on(cdns->usb2_phy); if (ret) return ret; ret = phy_power_on(cdns->usb3_phy); if (ret) phy_power_off(cdns->usb2_phy); return ret; } static void set_phy_power_off(struct cdns *cdns) { phy_power_off(cdns->usb3_phy); phy_power_off(cdns->usb2_phy); } /** * cdns3_plat_probe - probe for cdns3 core device * @pdev: Pointer to cdns3 core platform device * * Returns 0 on success otherwise negative errno */ static int cdns3_plat_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct cdns *cdns; void __iomem *regs; int ret; cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL); if (!cdns) return -ENOMEM; cdns->dev = dev; cdns->pdata = dev_get_platdata(dev); platform_set_drvdata(pdev, cdns); ret = platform_get_irq_byname(pdev, "host"); if (ret < 0) return ret; cdns->xhci_res[0].start = ret; cdns->xhci_res[0].end = ret; cdns->xhci_res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(ret); cdns->xhci_res[0].name = "host"; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci"); if (!res) { dev_err(dev, "couldn't get xhci resource\n"); return -ENXIO; } cdns->xhci_res[1] = *res; cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral"); if (cdns->dev_irq < 0) return dev_err_probe(dev, cdns->dev_irq, "Failed to get peripheral IRQ\n"); regs = devm_platform_ioremap_resource_byname(pdev, "dev"); if (IS_ERR(regs)) return dev_err_probe(dev, PTR_ERR(regs), "Failed to get dev base\n"); cdns->dev_regs = regs; cdns->otg_irq = platform_get_irq_byname(pdev, "otg"); if (cdns->otg_irq < 0) return dev_err_probe(dev, cdns->otg_irq, "Failed to get otg IRQ\n"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg"); if (!res) { dev_err(dev, "couldn't get otg resource\n"); return -ENXIO; } cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable"); cdns->otg_res = *res; cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup"); if (cdns->wakeup_irq == -EPROBE_DEFER) return cdns->wakeup_irq; if (cdns->wakeup_irq < 0) { dev_dbg(dev, "couldn't get wakeup irq\n"); cdns->wakeup_irq = 0x0; } cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy"); if (IS_ERR(cdns->usb2_phy)) return dev_err_probe(dev, PTR_ERR(cdns->usb2_phy), "Failed to get cdn3,usb2-phy\n"); ret = phy_init(cdns->usb2_phy); if (ret) return ret; cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy"); if (IS_ERR(cdns->usb3_phy)) return dev_err_probe(dev, PTR_ERR(cdns->usb3_phy), "Failed to get cdn3,usb3-phy\n"); ret = phy_init(cdns->usb3_phy); if (ret) goto err_phy3_init; ret = set_phy_power_on(cdns); if (ret) goto err_phy_power_on; cdns->gadget_init = cdns3_gadget_init; ret = cdns_init(cdns); if (ret) goto err_cdns_init; device_set_wakeup_capable(dev, true); pm_runtime_set_active(dev); pm_runtime_enable(dev); if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW))) pm_runtime_forbid(dev); /* * The controller needs less time between bus and controller suspend, * and we also needs a small delay to avoid frequently entering low * power mode. */ pm_runtime_set_autosuspend_delay(dev, 20); pm_runtime_mark_last_busy(dev); pm_runtime_use_autosuspend(dev); return 0; err_cdns_init: set_phy_power_off(cdns); err_phy_power_on: phy_exit(cdns->usb3_phy); err_phy3_init: phy_exit(cdns->usb2_phy); return ret; } /** * cdns3_plat_remove() - unbind drd driver and clean up * @pdev: Pointer to Linux platform device * * Returns 0 on success otherwise negative errno */ static void cdns3_plat_remove(struct platform_device *pdev) { struct cdns *cdns = platform_get_drvdata(pdev); struct device *dev = cdns->dev; pm_runtime_get_sync(dev); pm_runtime_disable(dev); pm_runtime_put_noidle(dev); cdns_remove(cdns); set_phy_power_off(cdns); phy_exit(cdns->usb2_phy); phy_exit(cdns->usb3_phy); } #ifdef CONFIG_PM static int cdns3_set_platform_suspend(struct device *dev, bool suspend, bool wakeup) { struct cdns *cdns = dev_get_drvdata(dev); int ret = 0; if (cdns->pdata && cdns->pdata->platform_suspend) ret = cdns->pdata->platform_suspend(dev, suspend, wakeup); return ret; } static int cdns3_controller_suspend(struct device *dev, pm_message_t msg) { struct cdns *cdns = dev_get_drvdata(dev); bool wakeup; unsigned long flags; if (cdns->in_lpm) return 0; if (PMSG_IS_AUTO(msg)) wakeup = true; else wakeup = device_may_wakeup(dev); cdns3_set_platform_suspend(cdns->dev, true, wakeup); set_phy_power_off(cdns); spin_lock_irqsave(&cdns->lock, flags); cdns->in_lpm = true; spin_unlock_irqrestore(&cdns->lock, flags); dev_dbg(cdns->dev, "%s ends\n", __func__); return 0; } static int cdns3_controller_resume(struct device *dev, pm_message_t msg) { struct cdns *cdns = dev_get_drvdata(dev); int ret; unsigned long flags; if (!cdns->in_lpm) return 0; if (cdns_power_is_lost(cdns)) { phy_exit(cdns->usb2_phy); ret = phy_init(cdns->usb2_phy); if (ret) return ret; phy_exit(cdns->usb3_phy); ret = phy_init(cdns->usb3_phy); if (ret) return ret; } ret = set_phy_power_on(cdns); if (ret) return ret; cdns3_set_platform_suspend(cdns->dev, false, false); spin_lock_irqsave(&cdns->lock, flags); cdns_resume(cdns); cdns->in_lpm = false; spin_unlock_irqrestore(&cdns->lock, flags); cdns_set_active(cdns, !PMSG_IS_AUTO(msg)); if (cdns->wakeup_pending) { cdns->wakeup_pending = false; enable_irq(cdns->wakeup_irq); } dev_dbg(cdns->dev, "%s ends\n", __func__); return ret; } static int cdns3_plat_runtime_suspend(struct device *dev) { return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND); } static int cdns3_plat_runtime_resume(struct device *dev) { return cdns3_controller_resume(dev, PMSG_AUTO_RESUME); } #ifdef CONFIG_PM_SLEEP static int cdns3_plat_suspend(struct device *dev) { struct cdns *cdns = dev_get_drvdata(dev); int ret; cdns_suspend(cdns); ret = cdns3_controller_suspend(dev, PMSG_SUSPEND); if (ret) return ret; if (device_may_wakeup(dev) && cdns->wakeup_irq) enable_irq_wake(cdns->wakeup_irq); return ret; } static int cdns3_plat_resume(struct device *dev) { return cdns3_controller_resume(dev, PMSG_RESUME); } #endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM */ static const struct dev_pm_ops cdns3_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(cdns3_plat_suspend, cdns3_plat_resume) SET_RUNTIME_PM_OPS(cdns3_plat_runtime_suspend, cdns3_plat_runtime_resume, NULL) }; #ifdef CONFIG_OF static const struct of_device_id of_cdns3_match[] = { { .compatible = "cdns,usb3" }, { }, }; MODULE_DEVICE_TABLE(of, of_cdns3_match); #endif static struct platform_driver cdns3_driver = { .probe = cdns3_plat_probe, .remove = cdns3_plat_remove, .driver = { .name = "cdns-usb3", .of_match_table = of_match_ptr(of_cdns3_match), .pm = &cdns3_pm_ops, }, }; module_platform_driver(cdns3_driver); MODULE_ALIAS("platform:cdns3"); MODULE_AUTHOR("Pawel Laszczak <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver");
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* * Google Kukui (and derivatives) audio fragment for ts3a227e. * * Copyright 2019 Google LLC. */ &i2c5 { ts3a227e: ts3a227e@3b { pinctrl-names = "default"; pinctrl-0 = <&ts3a227e_pins>; compatible = "ti,ts3a227e"; reg = <0x3b>; interrupts-extended = <&pio 157 IRQ_TYPE_LEVEL_LOW>; status = "okay"; }; }; &pio { ts3a227e_pins: ts3a227e_pins { pins1 { pinmux = <PINMUX_GPIO157__FUNC_GPIO157>; input-enable; bias-pull-up; }; }; }; &sound { mediatek,headset-codec = <&ts3a227e>; };
// SPDX-License-Identifier: GPL-2.0-only /* * Generic Generic NCR5380 driver * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * [email protected] * +1 (303) 440-4894 * * NCR53C400 extensions (c) 1994,1995,1996, Kevin Lentin * [email protected] * * NCR53C400A extensions (c) 1996, Ingmar Baumgart * [email protected] * * DTC3181E extensions (c) 1997, Ronald van Cuijlenborg * [email protected] or [email protected] * * Added ISAPNP support for DTC436 adapters, * Thomas Sailer, [email protected] * * See Documentation/scsi/g_NCR5380.rst for more info. */ #include <asm/io.h> #include <linux/blkdev.h> #include <linux/module.h> #include <scsi/scsi_host.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/isa.h> #include <linux/pnp.h> #include <linux/interrupt.h> /* Definitions for the core NCR5380 driver. */ #define NCR5380_read(reg) \ ioread8(hostdata->io + hostdata->offset + (reg)) #define NCR5380_write(reg, value) \ iowrite8(value, hostdata->io + hostdata->offset + (reg)) #define NCR5380_implementation_fields \ int offset; \ int c400_ctl_status; \ int c400_blk_cnt; \ int c400_host_buf; \ int io_width; \ int pdma_residual; \ int board #define NCR5380_dma_xfer_len generic_NCR5380_dma_xfer_len #define NCR5380_dma_recv_setup generic_NCR5380_precv #define NCR5380_dma_send_setup generic_NCR5380_psend #define NCR5380_dma_residual generic_NCR5380_dma_residual #define NCR5380_intr generic_NCR5380_intr #define NCR5380_queue_command generic_NCR5380_queue_command #define NCR5380_abort generic_NCR5380_abort #define NCR5380_host_reset generic_NCR5380_host_reset #define NCR5380_info generic_NCR5380_info #define NCR5380_io_delay(x) udelay(x) #include "NCR5380.h" #define DRV_MODULE_NAME "g_NCR5380" #define NCR53C400_mem_base 0x3880 #define NCR53C400_host_buffer 0x3900 #define NCR53C400_region_size 0x3a00 #define BOARD_NCR5380 0 #define BOARD_NCR53C400 1 #define BOARD_NCR53C400A 2 #define BOARD_DTC3181E 3 #define BOARD_HP_C2502 4 #define IRQ_AUTO 254 #define MAX_CARDS 8 #define DMA_MAX_SIZE 32768 /* old-style parameters for compatibility */ static int ncr_irq = -1; static int ncr_addr; static int ncr_5380; static int ncr_53c400; static int ncr_53c400a; static int dtc_3181e; static int hp_c2502; module_param_hw(ncr_irq, int, irq, 0); module_param_hw(ncr_addr, int, ioport, 0); module_param(ncr_5380, int, 0); module_param(ncr_53c400, int, 0); module_param(ncr_53c400a, int, 0); module_param(dtc_3181e, int, 0); module_param(hp_c2502, int, 0); static int irq[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; module_param_hw_array(irq, int, irq, NULL, 0); MODULE_PARM_DESC(irq, "IRQ number(s) (0=none, 254=auto [default])"); static int base[] = { 0, 0, 0, 0, 0, 0, 0, 0 }; module_param_hw_array(base, int, ioport, NULL, 0); MODULE_PARM_DESC(base, "base address(es)"); static int card[] = { -1, -1, -1, -1, -1, -1, -1, -1 }; module_param_array(card, int, NULL, 0); MODULE_PARM_DESC(card, "card type (0=NCR5380, 1=NCR53C400, 2=NCR53C400A, 3=DTC3181E, 4=HP C2502)"); MODULE_ALIAS("g_NCR5380_mmio"); MODULE_DESCRIPTION("Generic NCR5380/NCR53C400 SCSI driver"); MODULE_LICENSE("GPL"); static void g_NCR5380_trigger_irq(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); /* * An interrupt is triggered whenever BSY = false, SEL = true * and a bit set in the SELECT_ENABLE_REG is asserted on the * SCSI bus. * * Note that the bus is only driven when the phase control signals * (I/O, C/D, and MSG) match those in the TCR. */ NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG) & PHASE_MASK)); NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA | ICR_ASSERT_SEL); msleep(1); NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); NCR5380_write(SELECT_ENABLE_REG, 0); NCR5380_write(TARGET_COMMAND_REG, 0); } /** * g_NCR5380_probe_irq - find the IRQ of a NCR5380 or equivalent * @instance: SCSI host instance * * Autoprobe for the IRQ line used by the card by triggering an IRQ * and then looking to see what interrupt actually turned up. */ static int g_NCR5380_probe_irq(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); int irq_mask, irq; NCR5380_read(RESET_PARITY_INTERRUPT_REG); irq_mask = probe_irq_on(); g_NCR5380_trigger_irq(instance); irq = probe_irq_off(irq_mask); NCR5380_read(RESET_PARITY_INTERRUPT_REG); if (irq <= 0) return NO_IRQ; return irq; } /* * Configure I/O address of 53C400A or DTC436 by writing magic numbers * to ports 0x779 and 0x379. */ static void magic_configure(int idx, u8 irq, u8 magic[]) { u8 cfg = 0; outb(magic[0], 0x779); outb(magic[1], 0x379); outb(magic[2], 0x379); outb(magic[3], 0x379); outb(magic[4], 0x379); if (irq == 9) irq = 2; if (idx >= 0 && idx <= 7) cfg = 0x80 | idx | (irq << 4); outb(cfg, 0x379); } static irqreturn_t legacy_empty_irq_handler(int irq, void *dev_id) { return IRQ_HANDLED; } static int legacy_find_free_irq(int *irq_table) { while (*irq_table != -1) { if (!request_irq(*irq_table, legacy_empty_irq_handler, IRQF_PROBE_SHARED, "Test IRQ", (void *)irq_table)) { free_irq(*irq_table, (void *) irq_table); return *irq_table; } irq_table++; } return -1; } static unsigned int ncr_53c400a_ports[] = { 0x280, 0x290, 0x300, 0x310, 0x330, 0x340, 0x348, 0x350, 0 }; static unsigned int dtc_3181e_ports[] = { 0x220, 0x240, 0x280, 0x2a0, 0x2c0, 0x300, 0x320, 0x340, 0 }; static u8 ncr_53c400a_magic[] = { /* 53C400A & DTC436 */ 0x59, 0xb9, 0xc5, 0xae, 0xa6 }; static u8 hp_c2502_magic[] = { /* HP C2502 */ 0x0f, 0x22, 0xf0, 0x20, 0x80 }; static int hp_c2502_irqs[] = { 9, 5, 7, 3, 4, -1 }; static int generic_NCR5380_init_one(const struct scsi_host_template *tpnt, struct device *pdev, int base, int irq, int board) { bool is_pmio = base <= 0xffff; int ret; int flags = 0; unsigned int *ports = NULL; u8 *magic = NULL; int i; int port_idx = -1; unsigned long region_size; struct Scsi_Host *instance; struct NCR5380_hostdata *hostdata; u8 __iomem *iomem; switch (board) { case BOARD_NCR5380: flags = FLAG_NO_PSEUDO_DMA | FLAG_DMA_FIXUP; break; case BOARD_NCR53C400A: ports = ncr_53c400a_ports; magic = ncr_53c400a_magic; break; case BOARD_HP_C2502: ports = ncr_53c400a_ports; magic = hp_c2502_magic; break; case BOARD_DTC3181E: ports = dtc_3181e_ports; magic = ncr_53c400a_magic; break; } if (is_pmio && ports && magic) { /* wakeup sequence for the NCR53C400A and DTC3181E */ /* Disable the adapter and look for a free io port */ magic_configure(-1, 0, magic); region_size = 16; if (base) for (i = 0; ports[i]; i++) { if (base == ports[i]) { /* index found */ if (!request_region(ports[i], region_size, "ncr53c80")) return -EBUSY; break; } } else for (i = 0; ports[i]; i++) { if (!request_region(ports[i], region_size, "ncr53c80")) continue; if (inb(ports[i]) == 0xff) break; release_region(ports[i], region_size); } if (ports[i]) { /* At this point we have our region reserved */ magic_configure(i, 0, magic); /* no IRQ yet */ base = ports[i]; outb(0xc0, base + 9); if (inb(base + 9) != 0x80) { ret = -ENODEV; goto out_release; } port_idx = i; } else return -EINVAL; } else if (is_pmio) { /* NCR5380 - no configuration, just grab */ region_size = 8; if (!base || !request_region(base, region_size, "ncr5380")) return -EBUSY; } else { /* MMIO */ region_size = NCR53C400_region_size; if (!request_mem_region(base, region_size, "ncr5380")) return -EBUSY; } if (is_pmio) iomem = ioport_map(base, region_size); else iomem = ioremap(base, region_size); if (!iomem) { ret = -ENOMEM; goto out_release; } instance = scsi_host_alloc(tpnt, sizeof(struct NCR5380_hostdata)); if (instance == NULL) { ret = -ENOMEM; goto out_unmap; } hostdata = shost_priv(instance); hostdata->board = board; hostdata->io = iomem; hostdata->region_size = region_size; if (is_pmio) { hostdata->io_port = base; hostdata->io_width = 1; /* 8-bit PDMA by default */ hostdata->offset = 0; /* * On NCR53C400 boards, NCR5380 registers are mapped 8 past * the base address. */ switch (board) { case BOARD_NCR53C400: hostdata->io_port += 8; hostdata->c400_ctl_status = 0; hostdata->c400_blk_cnt = 1; hostdata->c400_host_buf = 4; break; case BOARD_DTC3181E: hostdata->io_width = 2; /* 16-bit PDMA */ fallthrough; case BOARD_NCR53C400A: case BOARD_HP_C2502: hostdata->c400_ctl_status = 9; hostdata->c400_blk_cnt = 10; hostdata->c400_host_buf = 8; break; } } else { hostdata->base = base; hostdata->offset = NCR53C400_mem_base; switch (board) { case BOARD_NCR53C400: hostdata->c400_ctl_status = 0x100; hostdata->c400_blk_cnt = 0x101; hostdata->c400_host_buf = 0x104; break; case BOARD_DTC3181E: case BOARD_NCR53C400A: case BOARD_HP_C2502: pr_err(DRV_MODULE_NAME ": unknown register offsets\n"); ret = -EINVAL; goto out_unregister; } } /* Check for vacant slot */ NCR5380_write(MODE_REG, 0); if (NCR5380_read(MODE_REG) != 0) { ret = -ENODEV; goto out_unregister; } ret = NCR5380_init(instance, flags | FLAG_LATE_DMA_SETUP); if (ret) goto out_unregister; switch (board) { case BOARD_NCR53C400: case BOARD_DTC3181E: case BOARD_NCR53C400A: case BOARD_HP_C2502: NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); } NCR5380_maybe_reset_bus(instance); /* Compatibility with documented NCR5380 kernel parameters */ if (irq == 255 || irq == 0) irq = NO_IRQ; else if (irq == -1) irq = IRQ_AUTO; if (board == BOARD_HP_C2502) { int *irq_table = hp_c2502_irqs; int board_irq = -1; switch (irq) { case NO_IRQ: board_irq = 0; break; case IRQ_AUTO: board_irq = legacy_find_free_irq(irq_table); break; default: while (*irq_table != -1) if (*irq_table++ == irq) board_irq = irq; } if (board_irq <= 0) { board_irq = 0; irq = NO_IRQ; } magic_configure(port_idx, board_irq, magic); } if (irq == IRQ_AUTO) { instance->irq = g_NCR5380_probe_irq(instance); if (instance->irq == NO_IRQ) shost_printk(KERN_INFO, instance, "no irq detected\n"); } else { instance->irq = irq; if (instance->irq == NO_IRQ) shost_printk(KERN_INFO, instance, "no irq provided\n"); } if (instance->irq != NO_IRQ) { if (request_irq(instance->irq, generic_NCR5380_intr, 0, "NCR5380", instance)) { instance->irq = NO_IRQ; shost_printk(KERN_INFO, instance, "irq %d denied\n", instance->irq); } else { shost_printk(KERN_INFO, instance, "irq %d acquired\n", instance->irq); } } ret = scsi_add_host(instance, pdev); if (ret) goto out_free_irq; scsi_scan_host(instance); dev_set_drvdata(pdev, instance); return 0; out_free_irq: if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); NCR5380_exit(instance); out_unregister: scsi_host_put(instance); out_unmap: iounmap(iomem); out_release: if (is_pmio) release_region(base, region_size); else release_mem_region(base, region_size); return ret; } static void generic_NCR5380_release_resources(struct Scsi_Host *instance) { struct NCR5380_hostdata *hostdata = shost_priv(instance); void __iomem *iomem = hostdata->io; unsigned long io_port = hostdata->io_port; unsigned long base = hostdata->base; unsigned long region_size = hostdata->region_size; scsi_remove_host(instance); if (instance->irq != NO_IRQ) free_irq(instance->irq, instance); NCR5380_exit(instance); scsi_host_put(instance); iounmap(iomem); if (io_port) release_region(io_port, region_size); else release_mem_region(base, region_size); } /* wait_for_53c80_access - wait for 53C80 registers to become accessible * @hostdata: scsi host private data * * The registers within the 53C80 logic block are inaccessible until * bit 7 in the 53C400 control status register gets asserted. */ static void wait_for_53c80_access(struct NCR5380_hostdata *hostdata) { int count = 10000; do { if (hostdata->board == BOARD_DTC3181E) udelay(4); /* DTC436 chip hangs without this */ if (NCR5380_read(hostdata->c400_ctl_status) & CSR_53C80_REG) return; } while (--count > 0); scmd_printk(KERN_ERR, hostdata->connected, "53c80 registers not accessible, device will be reset\n"); NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); } /** * generic_NCR5380_precv - pseudo DMA receive * @hostdata: scsi host private data * @dst: buffer to write into * @len: transfer size * * Perform a pseudo DMA mode receive from a 53C400 or equivalent device. */ static inline int generic_NCR5380_precv(struct NCR5380_hostdata *hostdata, unsigned char *dst, int len) { int residual; int start = 0; NCR5380_write(hostdata->c400_ctl_status, CSR_BASE | CSR_TRANS_DIR); NCR5380_write(hostdata->c400_blk_cnt, len / 128); do { if (start == len - 128) { /* Ignore End of DMA interrupt for the final buffer */ if (NCR5380_poll_politely(hostdata, hostdata->c400_ctl_status, CSR_HOST_BUF_NOT_RDY, 0, 0) < 0) break; } else { if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status, CSR_HOST_BUF_NOT_RDY, 0, hostdata->c400_ctl_status, CSR_GATED_53C80_IRQ, CSR_GATED_53C80_IRQ, 0) < 0 || NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) break; } if (hostdata->io_port && hostdata->io_width == 2) insw(hostdata->io_port + hostdata->c400_host_buf, dst + start, 64); else if (hostdata->io_port) insb(hostdata->io_port + hostdata->c400_host_buf, dst + start, 128); else memcpy_fromio(dst + start, hostdata->io + NCR53C400_host_buffer, 128); start += 128; } while (start < len); residual = len - start; if (residual != 0) { /* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */ NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); } wait_for_53c80_access(hostdata); if (residual == 0 && NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER, 0) < 0) scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n", __func__); hostdata->pdma_residual = residual; return 0; } /** * generic_NCR5380_psend - pseudo DMA send * @hostdata: scsi host private data * @src: buffer to read from * @len: transfer size * * Perform a pseudo DMA mode send to a 53C400 or equivalent device. */ static inline int generic_NCR5380_psend(struct NCR5380_hostdata *hostdata, unsigned char *src, int len) { int residual; int start = 0; NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); NCR5380_write(hostdata->c400_blk_cnt, len / 128); do { if (NCR5380_poll_politely2(hostdata, hostdata->c400_ctl_status, CSR_HOST_BUF_NOT_RDY, 0, hostdata->c400_ctl_status, CSR_GATED_53C80_IRQ, CSR_GATED_53C80_IRQ, 0) < 0 || NCR5380_read(hostdata->c400_ctl_status) & CSR_HOST_BUF_NOT_RDY) { /* Both 128 B buffers are in use */ if (start >= 128) start -= 128; if (start >= 128) start -= 128; break; } if (start >= len && NCR5380_read(hostdata->c400_blk_cnt) == 0) break; if (NCR5380_read(hostdata->c400_ctl_status) & CSR_GATED_53C80_IRQ) { /* Host buffer is empty, other one is in use */ if (start >= 128) start -= 128; break; } if (start >= len) continue; if (hostdata->io_port && hostdata->io_width == 2) outsw(hostdata->io_port + hostdata->c400_host_buf, src + start, 64); else if (hostdata->io_port) outsb(hostdata->io_port + hostdata->c400_host_buf, src + start, 128); else memcpy_toio(hostdata->io + NCR53C400_host_buffer, src + start, 128); start += 128; } while (1); residual = len - start; if (residual != 0) { /* 53c80 interrupt or transfer timeout. Reset 53c400 logic. */ NCR5380_write(hostdata->c400_ctl_status, CSR_RESET); NCR5380_write(hostdata->c400_ctl_status, CSR_BASE); } wait_for_53c80_access(hostdata); if (residual == 0) { if (NCR5380_poll_politely(hostdata, TARGET_COMMAND_REG, TCR_LAST_BYTE_SENT, TCR_LAST_BYTE_SENT, 0) < 0) scmd_printk(KERN_ERR, hostdata->connected, "%s: Last Byte Sent timeout\n", __func__); if (NCR5380_poll_politely(hostdata, BUS_AND_STATUS_REG, BASR_END_DMA_TRANSFER, BASR_END_DMA_TRANSFER, 0) < 0) scmd_printk(KERN_ERR, hostdata->connected, "%s: End of DMA timeout\n", __func__); } hostdata->pdma_residual = residual; return 0; } static int generic_NCR5380_dma_xfer_len(struct NCR5380_hostdata *hostdata, struct scsi_cmnd *cmd) { int transfersize = NCR5380_to_ncmd(cmd)->this_residual; if (hostdata->flags & FLAG_NO_PSEUDO_DMA) return 0; /* 53C400 datasheet: non-modulo-128-byte transfers should use PIO */ if (transfersize % 128) return 0; /* Limit PDMA send to 512 B to avoid random corruption on DTC3181E */ if (hostdata->board == BOARD_DTC3181E && cmd->sc_data_direction == DMA_TO_DEVICE) transfersize = min(transfersize, 512); return min(transfersize, DMA_MAX_SIZE); } static int generic_NCR5380_dma_residual(struct NCR5380_hostdata *hostdata) { return hostdata->pdma_residual; } /* Include the core driver code. */ #include "NCR5380.c" static const struct scsi_host_template driver_template = { .module = THIS_MODULE, .proc_name = DRV_MODULE_NAME, .name = "Generic NCR5380/NCR53C400 SCSI", .info = generic_NCR5380_info, .queuecommand = generic_NCR5380_queue_command, .eh_abort_handler = generic_NCR5380_abort, .eh_host_reset_handler = generic_NCR5380_host_reset, .can_queue = 16, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = 2, .dma_boundary = PAGE_SIZE - 1, .cmd_size = sizeof(struct NCR5380_cmd), .max_sectors = 128, }; static int generic_NCR5380_isa_match(struct device *pdev, unsigned int ndev) { int ret = generic_NCR5380_init_one(&driver_template, pdev, base[ndev], irq[ndev], card[ndev]); if (ret) { if (base[ndev]) printk(KERN_WARNING "Card not found at address 0x%03x\n", base[ndev]); return 0; } return 1; } static void generic_NCR5380_isa_remove(struct device *pdev, unsigned int ndev) { generic_NCR5380_release_resources(dev_get_drvdata(pdev)); dev_set_drvdata(pdev, NULL); } static struct isa_driver generic_NCR5380_isa_driver = { .match = generic_NCR5380_isa_match, .remove = generic_NCR5380_isa_remove, .driver = { .name = DRV_MODULE_NAME }, }; #ifdef CONFIG_PNP static const struct pnp_device_id generic_NCR5380_pnp_ids[] = { { .id = "DTC436e", .driver_data = BOARD_DTC3181E }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, generic_NCR5380_pnp_ids); static int generic_NCR5380_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id) { int base, irq; if (pnp_activate_dev(pdev) < 0) return -EBUSY; base = pnp_port_start(pdev, 0); irq = pnp_irq(pdev, 0); return generic_NCR5380_init_one(&driver_template, &pdev->dev, base, irq, id->driver_data); } static void generic_NCR5380_pnp_remove(struct pnp_dev *pdev) { generic_NCR5380_release_resources(pnp_get_drvdata(pdev)); pnp_set_drvdata(pdev, NULL); } static struct pnp_driver generic_NCR5380_pnp_driver = { .name = DRV_MODULE_NAME, .id_table = generic_NCR5380_pnp_ids, .probe = generic_NCR5380_pnp_probe, .remove = generic_NCR5380_pnp_remove, }; #endif /* defined(CONFIG_PNP) */ static int pnp_registered, isa_registered; static int __init generic_NCR5380_init(void) { int ret = 0; /* compatibility with old-style parameters */ if (irq[0] == -1 && base[0] == 0 && card[0] == -1) { irq[0] = ncr_irq; base[0] = ncr_addr; if (ncr_5380) card[0] = BOARD_NCR5380; if (ncr_53c400) card[0] = BOARD_NCR53C400; if (ncr_53c400a) card[0] = BOARD_NCR53C400A; if (dtc_3181e) card[0] = BOARD_DTC3181E; if (hp_c2502) card[0] = BOARD_HP_C2502; } #ifdef CONFIG_PNP if (!pnp_register_driver(&generic_NCR5380_pnp_driver)) pnp_registered = 1; #endif ret = isa_register_driver(&generic_NCR5380_isa_driver, MAX_CARDS); if (!ret) isa_registered = 1; return (pnp_registered || isa_registered) ? 0 : ret; } static void __exit generic_NCR5380_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_driver(&generic_NCR5380_pnp_driver); #endif if (isa_registered) isa_unregister_driver(&generic_NCR5380_isa_driver); } module_init(generic_NCR5380_init); module_exit(generic_NCR5380_exit);
/* * Broadcom BCM470X / BCM5301X ARM platform code. * Generic DTS part for all BCM53010, BCM53011, BCM53012, BCM53014, BCM53015, * BCM53016, BCM53017, BCM53018, BCM4707, BCM4708 and BCM4709 SoCs * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcm-ns.dtsi" / { mpcore-bus@19000000 { a9pll: arm_clk@0 { #clock-cells = <0>; compatible = "brcm,nsp-armpll"; clocks = <&osc>; reg = <0x00000 0x1000>; }; watchdog@20620 { compatible = "arm,cortex-a9-twd-wdt"; reg = <0x20620 0x20>; interrupts = <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(2) | IRQ_TYPE_EDGE_RISING)>; clocks = <&periph_clk>; }; }; clocks { #address-cells = <1>; #size-cells = <1>; ranges; osc: oscillator { #clock-cells = <0>; compatible = "fixed-clock"; clock-frequency = <25000000>; }; iprocmed: iprocmed { #clock-cells = <0>; compatible = "fixed-factor-clock"; clocks = <&genpll BCM_NSP_GENPLL_IPROCFAST_CLK>; clock-div = <2>; clock-mult = <1>; }; iprocslow: iprocslow { #clock-cells = <0>; compatible = "fixed-factor-clock"; clocks = <&genpll BCM_NSP_GENPLL_IPROCFAST_CLK>; clock-div = <4>; clock-mult = <1>; }; periph_clk: periph_clk { #clock-cells = <0>; compatible = "fixed-factor-clock"; clocks = <&a9pll>; clock-div = <2>; clock-mult = <1>; }; }; i2c0: i2c@18009000 { compatible = "brcm,iproc-i2c"; reg = <0x18009000 0x50>; interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clock-frequency = <100000>; status = "disabled"; }; dmu-bus@1800c000 { cru-bus@100 { lcpll0: clock-controller@100 { #clock-cells = <1>; compatible = "brcm,nsp-lcpll0"; reg = <0x100 0x14>; clocks = <&osc>; clock-output-names = "lcpll0", "pcie_phy", "sdio", "ddr_phy"; }; genpll: clock-controller@140 { #clock-cells = <1>; compatible = "brcm,nsp-genpll"; reg = <0x140 0x24>; clocks = <&osc>; clock-output-names = "genpll", "phy", "ethernetclk", "usbclk", "iprocfast", "sata1", "sata2"; }; }; }; spi@18029200 { compatible = "brcm,spi-nsp-qspi", "brcm,spi-bcm-qspi"; reg = <0x18029200 0x184>, <0x18029000 0x124>, <0x1811b408 0x004>, <0x180293a0 0x01c>; reg-names = "mspi", "bspi", "intr_regs", "intr_status_reg"; interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 74 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "mspi_done", "mspi_halted", "spi_lr_fullness_reached", "spi_lr_session_aborted", "spi_lr_impatient", "spi_lr_session_done", "spi_lr_overread"; clocks = <&iprocmed>; num-cs = <2>; #address-cells = <1>; #size-cells = <0>; spi_nor: flash@0 { compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <20000000>; status = "disabled"; partitions { compatible = "brcm,bcm947xx-cfe-partitions"; }; }; }; };
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright (C) 2020 Icenowy Zheng <[email protected]> * */ /dts-v1/; #include "sun50i-a64-pinetab.dts" / { model = "Pine64 PineTab Early Adopter"; compatible = "pine64,pinetab-early-adopter", "allwinner,sun50i-a64"; }; &dsi { /delete-node/ panel@0; panel@0 { compatible = "feixin,k101-im2byl02", "ilitek,ili9881c"; reg = <0>; power-supply = <&reg_dc1sw>; reset-gpios = <&pio 3 24 GPIO_ACTIVE_LOW>; /* PD24 */ backlight = <&backlight>; }; };
/* SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 */ /* Copyright (c) 2016-2018 Mellanox Technologies. All rights reserved */ #ifndef _MLXSW_RESOURCES_H #define _MLXSW_RESOURCES_H #include <linux/kernel.h> #include <linux/types.h> enum mlxsw_res_id { MLXSW_RES_ID_KVD_SIZE, MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE, MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE, MLXSW_RES_ID_PGT_SIZE, MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE, MLXSW_RES_ID_MAX_KVD_ACTION_SETS, MLXSW_RES_ID_MAX_TRAP_GROUPS, MLXSW_RES_ID_CQE_V0, MLXSW_RES_ID_CQE_V1, MLXSW_RES_ID_CQE_V2, MLXSW_RES_ID_COUNTER_POOL_SIZE, MLXSW_RES_ID_COUNTER_BANK_SIZE, MLXSW_RES_ID_MAX_SPAN, MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES, MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC, MLXSW_RES_ID_MAX_SYSTEM_PORT, MLXSW_RES_ID_FID, MLXSW_RES_ID_MAX_LAG, MLXSW_RES_ID_MAX_LAG_MEMBERS, MLXSW_RES_ID_MAX_NVE_FLOOD_PRF, MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER, MLXSW_RES_ID_CELL_SIZE, MLXSW_RES_ID_MAX_HEADROOM_SIZE, MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS, MLXSW_RES_ID_ACL_MAX_TCAM_RULES, MLXSW_RES_ID_ACL_MAX_REGIONS, MLXSW_RES_ID_ACL_MAX_GROUPS, MLXSW_RES_ID_ACL_MAX_GROUP_SIZE, MLXSW_RES_ID_ACL_MAX_DEFAULT_ACTIONS, MLXSW_RES_ID_ACL_FLEX_KEYS, MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE, MLXSW_RES_ID_ACL_ACTIONS_PER_SET, MLXSW_RES_ID_ACL_MAX_L4_PORT_RANGE, MLXSW_RES_ID_ACL_MAX_ERPT_BANKS, MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE, MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID, MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB, MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB, MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB, MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB, MLXSW_RES_ID_ACL_MAX_BF_LOG, MLXSW_RES_ID_MAX_GLOBAL_POLICERS, MLXSW_RES_ID_MAX_CPU_POLICERS, MLXSW_RES_ID_MAX_VRS, MLXSW_RES_ID_MAX_RIFS, MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES, MLXSW_RES_ID_MAX_RIF_MAC_PROFILES, MLXSW_RES_ID_MAX_LPM_TREES, MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4, MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6, /* Internal resources. * Determined by the SW, not queried from the HW. */ MLXSW_RES_ID_KVD_SINGLE_SIZE, MLXSW_RES_ID_KVD_DOUBLE_SIZE, MLXSW_RES_ID_KVD_LINEAR_SIZE, __MLXSW_RES_ID_MAX, }; static u16 mlxsw_res_ids[] = { [MLXSW_RES_ID_KVD_SIZE] = 0x1001, [MLXSW_RES_ID_KVD_SINGLE_MIN_SIZE] = 0x1002, [MLXSW_RES_ID_KVD_DOUBLE_MIN_SIZE] = 0x1003, [MLXSW_RES_ID_PGT_SIZE] = 0x1004, [MLXSW_RES_ID_MAX_KVD_LINEAR_RANGE] = 0x1005, [MLXSW_RES_ID_MAX_KVD_ACTION_SETS] = 0x1007, [MLXSW_RES_ID_MAX_TRAP_GROUPS] = 0x2201, [MLXSW_RES_ID_CQE_V0] = 0x2210, [MLXSW_RES_ID_CQE_V1] = 0x2211, [MLXSW_RES_ID_CQE_V2] = 0x2212, [MLXSW_RES_ID_COUNTER_POOL_SIZE] = 0x2410, [MLXSW_RES_ID_COUNTER_BANK_SIZE] = 0x2411, [MLXSW_RES_ID_MAX_SPAN] = 0x2420, [MLXSW_RES_ID_COUNTER_SIZE_PACKETS_BYTES] = 0x2443, [MLXSW_RES_ID_COUNTER_SIZE_ROUTER_BASIC] = 0x2449, [MLXSW_RES_ID_MAX_SYSTEM_PORT] = 0x2502, [MLXSW_RES_ID_FID] = 0x2512, [MLXSW_RES_ID_MAX_LAG] = 0x2520, [MLXSW_RES_ID_MAX_LAG_MEMBERS] = 0x2521, [MLXSW_RES_ID_MAX_NVE_FLOOD_PRF] = 0x2522, [MLXSW_RES_ID_GUARANTEED_SHARED_BUFFER] = 0x2805, /* Bytes */ [MLXSW_RES_ID_CELL_SIZE] = 0x2803, /* Bytes */ [MLXSW_RES_ID_MAX_HEADROOM_SIZE] = 0x2811, /* Bytes */ [MLXSW_RES_ID_ACL_MAX_TCAM_REGIONS] = 0x2901, [MLXSW_RES_ID_ACL_MAX_TCAM_RULES] = 0x2902, [MLXSW_RES_ID_ACL_MAX_REGIONS] = 0x2903, [MLXSW_RES_ID_ACL_MAX_GROUPS] = 0x2904, [MLXSW_RES_ID_ACL_MAX_GROUP_SIZE] = 0x2905, [MLXSW_RES_ID_ACL_MAX_DEFAULT_ACTIONS] = 0x2908, [MLXSW_RES_ID_ACL_FLEX_KEYS] = 0x2910, [MLXSW_RES_ID_ACL_MAX_ACTION_PER_RULE] = 0x2911, [MLXSW_RES_ID_ACL_ACTIONS_PER_SET] = 0x2912, [MLXSW_RES_ID_ACL_MAX_L4_PORT_RANGE] = 0x2920, [MLXSW_RES_ID_ACL_MAX_ERPT_BANKS] = 0x2940, [MLXSW_RES_ID_ACL_MAX_ERPT_BANK_SIZE] = 0x2941, [MLXSW_RES_ID_ACL_MAX_LARGE_KEY_ID] = 0x2942, [MLXSW_RES_ID_ACL_ERPT_ENTRIES_2KB] = 0x2950, [MLXSW_RES_ID_ACL_ERPT_ENTRIES_4KB] = 0x2951, [MLXSW_RES_ID_ACL_ERPT_ENTRIES_8KB] = 0x2952, [MLXSW_RES_ID_ACL_ERPT_ENTRIES_12KB] = 0x2953, [MLXSW_RES_ID_ACL_MAX_BF_LOG] = 0x2960, [MLXSW_RES_ID_MAX_GLOBAL_POLICERS] = 0x2A10, [MLXSW_RES_ID_MAX_CPU_POLICERS] = 0x2A13, [MLXSW_RES_ID_MAX_VRS] = 0x2C01, [MLXSW_RES_ID_MAX_RIFS] = 0x2C02, [MLXSW_RES_ID_MC_ERIF_LIST_ENTRIES] = 0x2C10, [MLXSW_RES_ID_MAX_RIF_MAC_PROFILES] = 0x2C14, [MLXSW_RES_ID_MAX_LPM_TREES] = 0x2C30, [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV4] = 0x2E02, [MLXSW_RES_ID_MAX_NVE_MC_ENTRIES_IPV6] = 0x2E03, }; struct mlxsw_res { bool valid[__MLXSW_RES_ID_MAX]; u64 values[__MLXSW_RES_ID_MAX]; }; static inline bool mlxsw_res_valid(struct mlxsw_res *res, enum mlxsw_res_id res_id) { return res->valid[res_id]; } #define MLXSW_RES_VALID(res, short_res_id) \ mlxsw_res_valid(res, MLXSW_RES_ID_##short_res_id) static inline u64 mlxsw_res_get(struct mlxsw_res *res, enum mlxsw_res_id res_id) { if (WARN_ON(!res->valid[res_id])) return 0; return res->values[res_id]; } #define MLXSW_RES_GET(res, short_res_id) \ mlxsw_res_get(res, MLXSW_RES_ID_##short_res_id) static inline void mlxsw_res_set(struct mlxsw_res *res, enum mlxsw_res_id res_id, u64 value) { res->valid[res_id] = true; res->values[res_id] = value; } #define MLXSW_RES_SET(res, short_res_id, value) \ mlxsw_res_set(res, MLXSW_RES_ID_##short_res_id, value) static inline void mlxsw_res_parse(struct mlxsw_res *res, u16 id, u64 value) { int i; for (i = 0; i < ARRAY_SIZE(mlxsw_res_ids); i++) { if (mlxsw_res_ids[i] == id) { mlxsw_res_set(res, i, value); return; } } } #endif
// SPDX-License-Identifier: GPL-2.0-only /* * GPIO driver for the ACCES 104-IDI-48 family * Copyright (C) 2015 William Breathitt Gray * * This driver supports the following ACCES devices: 104-IDI-48A, * 104-IDI-48AC, 104-IDI-48B, and 104-IDI-48BC. */ #include <linux/bits.h> #include <linux/device.h> #include <linux/err.h> #include <linux/gpio/regmap.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/isa.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/regmap.h> #include <linux/types.h> #define IDI_48_EXTENT 8 #define MAX_NUM_IDI_48 max_num_isa_dev(IDI_48_EXTENT) static unsigned int base[MAX_NUM_IDI_48]; static unsigned int num_idi_48; module_param_hw_array(base, uint, ioport, &num_idi_48, 0); MODULE_PARM_DESC(base, "ACCES 104-IDI-48 base addresses"); static unsigned int irq[MAX_NUM_IDI_48]; static unsigned int num_irq; module_param_hw_array(irq, uint, irq, &num_irq, 0); MODULE_PARM_DESC(irq, "ACCES 104-IDI-48 interrupt line numbers"); #define IDI48_IRQ_STATUS 0x7 #define IDI48_IRQ_ENABLE IDI48_IRQ_STATUS static int idi_48_reg_mask_xlate(struct gpio_regmap *gpio, unsigned int base, unsigned int offset, unsigned int *reg, unsigned int *mask) { const unsigned int line = offset % 8; const unsigned int stride = offset / 8; const unsigned int port = (stride / 3) * 4; const unsigned int port_stride = stride % 3; *reg = base + port + port_stride; *mask = BIT(line); return 0; } static const struct regmap_range idi_48_wr_ranges[] = { regmap_reg_range(0x0, 0x6), }; static const struct regmap_range idi_48_rd_ranges[] = { regmap_reg_range(0x0, 0x2), regmap_reg_range(0x4, 0x7), }; static const struct regmap_range idi_48_precious_ranges[] = { regmap_reg_range(0x7, 0x7), }; static const struct regmap_access_table idi_48_wr_table = { .no_ranges = idi_48_wr_ranges, .n_no_ranges = ARRAY_SIZE(idi_48_wr_ranges), }; static const struct regmap_access_table idi_48_rd_table = { .yes_ranges = idi_48_rd_ranges, .n_yes_ranges = ARRAY_SIZE(idi_48_rd_ranges), }; static const struct regmap_access_table idi_48_precious_table = { .yes_ranges = idi_48_precious_ranges, .n_yes_ranges = ARRAY_SIZE(idi_48_precious_ranges), }; static const struct regmap_config idi48_regmap_config = { .reg_bits = 8, .reg_stride = 1, .val_bits = 8, .io_port = true, .max_register = 0x6, .wr_table = &idi_48_wr_table, .rd_table = &idi_48_rd_table, .precious_table = &idi_48_precious_table, .use_raw_spinlock = true, }; #define IDI48_NGPIO 48 #define IDI48_REGMAP_IRQ(_id) \ [_id] = { \ .mask = BIT((_id) / 8), \ .type = { .types_supported = IRQ_TYPE_EDGE_BOTH }, \ } static const struct regmap_irq idi48_regmap_irqs[IDI48_NGPIO] = { IDI48_REGMAP_IRQ(0), IDI48_REGMAP_IRQ(1), IDI48_REGMAP_IRQ(2), /* 0-2 */ IDI48_REGMAP_IRQ(3), IDI48_REGMAP_IRQ(4), IDI48_REGMAP_IRQ(5), /* 3-5 */ IDI48_REGMAP_IRQ(6), IDI48_REGMAP_IRQ(7), IDI48_REGMAP_IRQ(8), /* 6-8 */ IDI48_REGMAP_IRQ(9), IDI48_REGMAP_IRQ(10), IDI48_REGMAP_IRQ(11), /* 9-11 */ IDI48_REGMAP_IRQ(12), IDI48_REGMAP_IRQ(13), IDI48_REGMAP_IRQ(14), /* 12-14 */ IDI48_REGMAP_IRQ(15), IDI48_REGMAP_IRQ(16), IDI48_REGMAP_IRQ(17), /* 15-17 */ IDI48_REGMAP_IRQ(18), IDI48_REGMAP_IRQ(19), IDI48_REGMAP_IRQ(20), /* 18-20 */ IDI48_REGMAP_IRQ(21), IDI48_REGMAP_IRQ(22), IDI48_REGMAP_IRQ(23), /* 21-23 */ IDI48_REGMAP_IRQ(24), IDI48_REGMAP_IRQ(25), IDI48_REGMAP_IRQ(26), /* 24-26 */ IDI48_REGMAP_IRQ(27), IDI48_REGMAP_IRQ(28), IDI48_REGMAP_IRQ(29), /* 27-29 */ IDI48_REGMAP_IRQ(30), IDI48_REGMAP_IRQ(31), IDI48_REGMAP_IRQ(32), /* 30-32 */ IDI48_REGMAP_IRQ(33), IDI48_REGMAP_IRQ(34), IDI48_REGMAP_IRQ(35), /* 33-35 */ IDI48_REGMAP_IRQ(36), IDI48_REGMAP_IRQ(37), IDI48_REGMAP_IRQ(38), /* 36-38 */ IDI48_REGMAP_IRQ(39), IDI48_REGMAP_IRQ(40), IDI48_REGMAP_IRQ(41), /* 39-41 */ IDI48_REGMAP_IRQ(42), IDI48_REGMAP_IRQ(43), IDI48_REGMAP_IRQ(44), /* 42-44 */ IDI48_REGMAP_IRQ(45), IDI48_REGMAP_IRQ(46), IDI48_REGMAP_IRQ(47), /* 45-47 */ }; static const char *idi48_names[IDI48_NGPIO] = { "Bit 0 A", "Bit 1 A", "Bit 2 A", "Bit 3 A", "Bit 4 A", "Bit 5 A", "Bit 6 A", "Bit 7 A", "Bit 8 A", "Bit 9 A", "Bit 10 A", "Bit 11 A", "Bit 12 A", "Bit 13 A", "Bit 14 A", "Bit 15 A", "Bit 16 A", "Bit 17 A", "Bit 18 A", "Bit 19 A", "Bit 20 A", "Bit 21 A", "Bit 22 A", "Bit 23 A", "Bit 0 B", "Bit 1 B", "Bit 2 B", "Bit 3 B", "Bit 4 B", "Bit 5 B", "Bit 6 B", "Bit 7 B", "Bit 8 B", "Bit 9 B", "Bit 10 B", "Bit 11 B", "Bit 12 B", "Bit 13 B", "Bit 14 B", "Bit 15 B", "Bit 16 B", "Bit 17 B", "Bit 18 B", "Bit 19 B", "Bit 20 B", "Bit 21 B", "Bit 22 B", "Bit 23 B" }; static int idi_48_probe(struct device *dev, unsigned int id) { const char *const name = dev_name(dev); struct gpio_regmap_config config = {}; void __iomem *regs; struct regmap *map; struct regmap_irq_chip *chip; struct regmap_irq_chip_data *chip_data; int err; if (!devm_request_region(dev, base[id], IDI_48_EXTENT, name)) { dev_err(dev, "Unable to lock port addresses (0x%X-0x%X)\n", base[id], base[id] + IDI_48_EXTENT); return -EBUSY; } regs = devm_ioport_map(dev, base[id], IDI_48_EXTENT); if (!regs) return -ENOMEM; map = devm_regmap_init_mmio(dev, regs, &idi48_regmap_config); if (IS_ERR(map)) return dev_err_probe(dev, PTR_ERR(map), "Unable to initialize register map\n"); chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->name = name; chip->status_base = IDI48_IRQ_STATUS; chip->unmask_base = IDI48_IRQ_ENABLE; chip->clear_on_unmask = true; chip->num_regs = 1; chip->irqs = idi48_regmap_irqs; chip->num_irqs = ARRAY_SIZE(idi48_regmap_irqs); err = devm_regmap_add_irq_chip(dev, map, irq[id], IRQF_SHARED, 0, chip, &chip_data); if (err) return dev_err_probe(dev, err, "IRQ registration failed\n"); config.parent = dev; config.regmap = map; config.ngpio = IDI48_NGPIO; config.names = idi48_names; config.reg_dat_base = GPIO_REGMAP_ADDR(0x0); config.ngpio_per_reg = 8; config.reg_mask_xlate = idi_48_reg_mask_xlate; config.irq_domain = regmap_irq_get_domain(chip_data); return PTR_ERR_OR_ZERO(devm_gpio_regmap_register(dev, &config)); } static struct isa_driver idi_48_driver = { .probe = idi_48_probe, .driver = { .name = "104-idi-48" }, }; module_isa_driver_with_irq(idi_48_driver, num_idi_48, num_irq); MODULE_AUTHOR("William Breathitt Gray <[email protected]>"); MODULE_DESCRIPTION("ACCES 104-IDI-48 GPIO driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2015 Linaro Ltd. * Copyright (c) 2015 HiSilicon Limited. */ #include <linux/mfd/syscon.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> #include <linux/regmap.h> #define SC_PERIPH_CTRL4 0x00c #define CTRL4_PICO_SIDDQ BIT(6) #define CTRL4_PICO_OGDISABLE BIT(8) #define CTRL4_PICO_VBUSVLDEXT BIT(10) #define CTRL4_PICO_VBUSVLDEXTSEL BIT(11) #define CTRL4_OTG_PHY_SEL BIT(21) #define SC_PERIPH_CTRL5 0x010 #define CTRL5_USBOTG_RES_SEL BIT(3) #define CTRL5_PICOPHY_ACAENB BIT(4) #define CTRL5_PICOPHY_BC_MODE BIT(5) #define CTRL5_PICOPHY_CHRGSEL BIT(6) #define CTRL5_PICOPHY_VDATSRCEND BIT(7) #define CTRL5_PICOPHY_VDATDETENB BIT(8) #define CTRL5_PICOPHY_DCDENB BIT(9) #define CTRL5_PICOPHY_IDDIG BIT(10) #define SC_PERIPH_CTRL8 0x018 #define SC_PERIPH_RSTEN0 0x300 #define SC_PERIPH_RSTDIS0 0x304 #define RST0_USBOTG_BUS BIT(4) #define RST0_POR_PICOPHY BIT(5) #define RST0_USBOTG BIT(6) #define RST0_USBOTG_32K BIT(7) #define EYE_PATTERN_PARA 0x7053348c struct hi6220_priv { struct regmap *reg; struct device *dev; }; static void hi6220_phy_init(struct hi6220_priv *priv) { struct regmap *reg = priv->reg; u32 val, mask; val = RST0_USBOTG_BUS | RST0_POR_PICOPHY | RST0_USBOTG | RST0_USBOTG_32K; mask = val; regmap_update_bits(reg, SC_PERIPH_RSTEN0, mask, val); regmap_update_bits(reg, SC_PERIPH_RSTDIS0, mask, val); } static int hi6220_phy_setup(struct hi6220_priv *priv, bool on) { struct regmap *reg = priv->reg; u32 val, mask; int ret; if (on) { val = CTRL5_USBOTG_RES_SEL | CTRL5_PICOPHY_ACAENB; mask = val | CTRL5_PICOPHY_BC_MODE; ret = regmap_update_bits(reg, SC_PERIPH_CTRL5, mask, val); if (ret) goto out; val = CTRL4_PICO_VBUSVLDEXT | CTRL4_PICO_VBUSVLDEXTSEL | CTRL4_OTG_PHY_SEL; mask = val | CTRL4_PICO_SIDDQ | CTRL4_PICO_OGDISABLE; ret = regmap_update_bits(reg, SC_PERIPH_CTRL4, mask, val); if (ret) goto out; ret = regmap_write(reg, SC_PERIPH_CTRL8, EYE_PATTERN_PARA); if (ret) goto out; } else { val = CTRL4_PICO_SIDDQ; mask = val; ret = regmap_update_bits(reg, SC_PERIPH_CTRL4, mask, val); if (ret) goto out; } return 0; out: dev_err(priv->dev, "failed to setup phy ret: %d\n", ret); return ret; } static int hi6220_phy_start(struct phy *phy) { struct hi6220_priv *priv = phy_get_drvdata(phy); return hi6220_phy_setup(priv, true); } static int hi6220_phy_exit(struct phy *phy) { struct hi6220_priv *priv = phy_get_drvdata(phy); return hi6220_phy_setup(priv, false); } static const struct phy_ops hi6220_phy_ops = { .init = hi6220_phy_start, .exit = hi6220_phy_exit, .owner = THIS_MODULE, }; static int hi6220_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct device *dev = &pdev->dev; struct phy *phy; struct hi6220_priv *priv; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->dev = dev; priv->reg = syscon_regmap_lookup_by_phandle(dev->of_node, "hisilicon,peripheral-syscon"); if (IS_ERR(priv->reg)) { dev_err(dev, "no hisilicon,peripheral-syscon\n"); return PTR_ERR(priv->reg); } hi6220_phy_init(priv); phy = devm_phy_create(dev, NULL, &hi6220_phy_ops); if (IS_ERR(phy)) return PTR_ERR(phy); phy_set_drvdata(phy, priv); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); return PTR_ERR_OR_ZERO(phy_provider); } static const struct of_device_id hi6220_phy_of_match[] = { {.compatible = "hisilicon,hi6220-usb-phy",}, { }, }; MODULE_DEVICE_TABLE(of, hi6220_phy_of_match); static struct platform_driver hi6220_phy_driver = { .probe = hi6220_phy_probe, .driver = { .name = "hi6220-usb-phy", .of_match_table = hi6220_phy_of_match, } }; module_platform_driver(hi6220_phy_driver); MODULE_DESCRIPTION("HISILICON HI6220 USB PHY driver"); MODULE_ALIAS("platform:hi6220-usb-phy"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* * ADXL313 3-Axis Digital Accelerometer * * Copyright (c) 2021 Lucas Stankus <[email protected]> * * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/ADXL313.pdf */ #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/spi/spi.h> #include <linux/property.h> #include "adxl313.h" static const struct regmap_config adxl31x_spi_regmap_config[] = { [ADXL312] = { .reg_bits = 8, .val_bits = 8, .rd_table = &adxl312_readable_regs_table, .wr_table = &adxl312_writable_regs_table, .max_register = 0x39, /* Setting bits 7 and 6 enables multiple-byte read */ .read_flag_mask = BIT(7) | BIT(6), }, [ADXL313] = { .reg_bits = 8, .val_bits = 8, .rd_table = &adxl313_readable_regs_table, .wr_table = &adxl313_writable_regs_table, .max_register = 0x39, /* Setting bits 7 and 6 enables multiple-byte read */ .read_flag_mask = BIT(7) | BIT(6), }, [ADXL314] = { .reg_bits = 8, .val_bits = 8, .rd_table = &adxl314_readable_regs_table, .wr_table = &adxl314_writable_regs_table, .max_register = 0x39, /* Setting bits 7 and 6 enables multiple-byte read */ .read_flag_mask = BIT(7) | BIT(6), }, }; static int adxl313_spi_setup(struct device *dev, struct regmap *regmap) { struct spi_device *spi = container_of(dev, struct spi_device, dev); int ret; if (spi->mode & SPI_3WIRE) { ret = regmap_write(regmap, ADXL313_REG_DATA_FORMAT, ADXL313_SPI_3WIRE); if (ret) return ret; } return regmap_update_bits(regmap, ADXL313_REG_POWER_CTL, ADXL313_I2C_DISABLE, ADXL313_I2C_DISABLE); } static int adxl313_spi_probe(struct spi_device *spi) { const struct adxl313_chip_info *chip_data; struct regmap *regmap; int ret; spi->mode |= SPI_MODE_3; ret = spi_setup(spi); if (ret) return ret; chip_data = spi_get_device_match_data(spi); regmap = devm_regmap_init_spi(spi, &adxl31x_spi_regmap_config[chip_data->type]); if (IS_ERR(regmap)) { dev_err(&spi->dev, "Error initializing spi regmap: %ld\n", PTR_ERR(regmap)); return PTR_ERR(regmap); } return adxl313_core_probe(&spi->dev, regmap, chip_data, &adxl313_spi_setup); } static const struct spi_device_id adxl313_spi_id[] = { { .name = "adxl312", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL312] }, { .name = "adxl313", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL313] }, { .name = "adxl314", .driver_data = (kernel_ulong_t)&adxl31x_chip_info[ADXL314] }, { } }; MODULE_DEVICE_TABLE(spi, adxl313_spi_id); static const struct of_device_id adxl313_of_match[] = { { .compatible = "adi,adxl312", .data = &adxl31x_chip_info[ADXL312] }, { .compatible = "adi,adxl313", .data = &adxl31x_chip_info[ADXL313] }, { .compatible = "adi,adxl314", .data = &adxl31x_chip_info[ADXL314] }, { } }; MODULE_DEVICE_TABLE(of, adxl313_of_match); static struct spi_driver adxl313_spi_driver = { .driver = { .name = "adxl313_spi", .of_match_table = adxl313_of_match, }, .probe = adxl313_spi_probe, .id_table = adxl313_spi_id, }; module_spi_driver(adxl313_spi_driver); MODULE_AUTHOR("Lucas Stankus <[email protected]>"); MODULE_DESCRIPTION("ADXL313 3-Axis Digital Accelerometer SPI driver"); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS("IIO_ADXL313");
/* * Copyright © 2017 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * */ #include "mock_uncore.h" #define __nop_write(x) \ static void \ nop_write##x(struct intel_uncore *uncore, i915_reg_t reg, u##x val, bool trace) { } __nop_write(8) __nop_write(16) __nop_write(32) #define __nop_read(x) \ static u##x \ nop_read##x(struct intel_uncore *uncore, i915_reg_t reg, bool trace) { return 0; } __nop_read(8) __nop_read(16) __nop_read(32) __nop_read(64) void mock_uncore_init(struct intel_uncore *uncore, struct drm_i915_private *i915) { intel_uncore_init_early(uncore, to_gt(i915)); ASSIGN_RAW_WRITE_MMIO_VFUNCS(uncore, nop); ASSIGN_RAW_READ_MMIO_VFUNCS(uncore, nop); }
/* * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DISPLAY_MODE_ENUMS_H__ #define __DISPLAY_MODE_ENUMS_H__ enum output_encoder_class { dm_dp = 0, dm_hdmi = 1, dm_wb = 2, dm_edp = 3, dm_dp2p0 = 5, }; enum output_format_class { dm_444 = 0, dm_420 = 1, dm_n422, dm_s422 }; enum source_format_class { dm_444_16 = 0, dm_444_32 = 1, dm_444_64 = 2, dm_420_8 = 3, dm_420_10 = 4, dm_420_12 = 5, dm_422_8 = 6, dm_422_10 = 7, dm_444_8 = 8, dm_mono_8 = dm_444_8, dm_mono_16 = dm_444_16, dm_rgbe = 9, dm_rgbe_alpha = 10, }; enum output_bpc_class { dm_out_6 = 0, dm_out_8 = 1, dm_out_10 = 2, dm_out_12 = 3, dm_out_16 = 4 }; enum scan_direction_class { dm_horz = 0, dm_vert = 1 }; enum dm_swizzle_mode { dm_sw_linear = 0, dm_sw_256b_s = 1, dm_sw_256b_d = 2, dm_sw_SPARE_0 = 3, dm_sw_SPARE_1 = 4, dm_sw_4kb_s = 5, dm_sw_4kb_d = 6, dm_sw_SPARE_2 = 7, dm_sw_SPARE_3 = 8, dm_sw_64kb_s = 9, dm_sw_64kb_d = 10, dm_sw_SPARE_4 = 11, dm_sw_SPARE_5 = 12, dm_sw_var_s = 13, dm_sw_var_d = 14, dm_sw_SPARE_6 = 15, dm_sw_SPARE_7 = 16, dm_sw_64kb_s_t = 17, dm_sw_64kb_d_t = 18, dm_sw_SPARE_10 = 19, dm_sw_SPARE_11 = 20, dm_sw_4kb_s_x = 21, dm_sw_4kb_d_x = 22, dm_sw_SPARE_12 = 23, dm_sw_SPARE_13 = 24, dm_sw_64kb_s_x = 25, dm_sw_64kb_d_x = 26, dm_sw_64kb_r_x = 27, dm_sw_SPARE_15 = 28, dm_sw_var_s_x = 29, dm_sw_var_d_x = 30, dm_sw_var_r_x = 31, dm_sw_gfx7_2d_thin_l_vp, dm_sw_gfx7_2d_thin_gl, }; enum lb_depth { dm_lb_10 = 0, dm_lb_8 = 1, dm_lb_6 = 2, dm_lb_12 = 3, dm_lb_16 = 4, dm_lb_19 = 5 }; enum voltage_state { dm_vmin = 0, dm_vmid = 1, dm_vnom = 2, dm_vmax = 3 }; enum source_macro_tile_size { dm_4k_tile = 0, dm_64k_tile = 1, dm_256k_tile = 2 }; enum cursor_bpp { dm_cur_2bit = 0, dm_cur_32bit = 1, dm_cur_64bit = 2 }; /** * @enum clock_change_support - It represents possible reasons to change the DRAM clock. * * DC may change the DRAM clock during its execution, and this enum tracks all * the available methods. Note that every ASIC has their specific way to deal * with these clock switch. */ enum clock_change_support { /** * @dm_dram_clock_change_uninitialized: If you see this, we might have * a code initialization issue */ dm_dram_clock_change_uninitialized = 0, /** * @dm_dram_clock_change_vactive: Support DRAM switch in VActive */ dm_dram_clock_change_vactive, /** * @dm_dram_clock_change_vblank: Support DRAM switch in VBlank */ dm_dram_clock_change_vblank, dm_dram_clock_change_vactive_w_mall_full_frame, dm_dram_clock_change_vactive_w_mall_sub_vp, dm_dram_clock_change_vblank_w_mall_full_frame, dm_dram_clock_change_vblank_w_mall_sub_vp, /** * @dm_dram_clock_change_unsupported: Do not support DRAM switch */ dm_dram_clock_change_unsupported }; enum output_standard { dm_std_uninitialized = 0, dm_std_cvtr2, dm_std_cvt }; enum mpc_combine_affinity { dm_mpc_always_when_possible, dm_mpc_reduce_voltage, dm_mpc_reduce_voltage_and_clocks, dm_mpc_never }; enum RequestType { REQ_256Bytes, REQ_128BytesNonContiguous, REQ_128BytesContiguous, REQ_NA }; enum self_refresh_affinity { dm_try_to_allow_self_refresh_and_mclk_switch, dm_allow_self_refresh_and_mclk_switch, dm_allow_self_refresh, dm_neither_self_refresh_nor_mclk_switch }; enum dm_validation_status { DML_VALIDATION_OK, DML_FAIL_SCALE_RATIO_TAP, DML_FAIL_SOURCE_PIXEL_FORMAT, DML_FAIL_VIEWPORT_SIZE, DML_FAIL_TOTAL_V_ACTIVE_BW, DML_FAIL_DIO_SUPPORT, DML_FAIL_NOT_ENOUGH_DSC, DML_FAIL_DSC_CLK_REQUIRED, DML_FAIL_DSC_VALIDATION_FAILURE, DML_FAIL_URGENT_LATENCY, DML_FAIL_REORDERING_BUFFER, DML_FAIL_DISPCLK_DPPCLK, DML_FAIL_TOTAL_AVAILABLE_PIPES, DML_FAIL_NUM_OTG, DML_FAIL_WRITEBACK_MODE, DML_FAIL_WRITEBACK_LATENCY, DML_FAIL_WRITEBACK_SCALE_RATIO_TAP, DML_FAIL_CURSOR_SUPPORT, DML_FAIL_PITCH_SUPPORT, DML_FAIL_PTE_BUFFER_SIZE, DML_FAIL_HOST_VM_IMMEDIATE_FLIP, DML_FAIL_DSC_INPUT_BPC, DML_FAIL_PREFETCH_SUPPORT, DML_FAIL_V_RATIO_PREFETCH, DML_FAIL_P2I_WITH_420, DML_FAIL_DSC_ONLY_IF_NECESSARY_WITH_BPP, DML_FAIL_NOT_DSC422_NATIVE, DML_FAIL_ODM_COMBINE4TO1, DML_FAIL_ENOUGH_WRITEBACK_UNITS, DML_FAIL_VIEWPORT_EXCEEDS_SURFACE, DML_FAIL_DYNAMIC_METADATA, DML_FAIL_FMT_BUFFER_EXCEEDED, }; enum writeback_config { dm_normal, dm_whole_buffer_for_single_stream_no_interleave, dm_whole_buffer_for_single_stream_interleave, }; enum odm_combine_mode { dm_odm_combine_mode_disabled, dm_odm_combine_mode_2to1, dm_odm_combine_mode_4to1, dm_odm_split_mode_1to2, dm_odm_mode_mso_1to2, dm_odm_mode_mso_1to4 }; enum odm_combine_policy { dm_odm_combine_policy_dal, dm_odm_combine_policy_none, dm_odm_combine_policy_2to1, dm_odm_combine_policy_4to1, dm_odm_split_policy_1to2, dm_odm_mso_policy_1to2, dm_odm_mso_policy_1to4, }; enum immediate_flip_requirement { dm_immediate_flip_not_required, dm_immediate_flip_required, dm_immediate_flip_opportunistic, }; enum unbounded_requesting_policy { dm_unbounded_requesting, dm_unbounded_requesting_edp_only, dm_unbounded_requesting_disable }; enum dm_rotation_angle { dm_rotation_0, dm_rotation_90, dm_rotation_180, dm_rotation_270, dm_rotation_0m, dm_rotation_90m, dm_rotation_180m, dm_rotation_270m, }; enum dm_use_mall_for_pstate_change_mode { dm_use_mall_pstate_change_disable, dm_use_mall_pstate_change_full_frame, dm_use_mall_pstate_change_sub_viewport, dm_use_mall_pstate_change_phantom_pipe }; enum dm_use_mall_for_static_screen_mode { dm_use_mall_static_screen_disable, dm_use_mall_static_screen_optimize, dm_use_mall_static_screen_enable, }; enum dm_output_link_dp_rate { dm_dp_rate_na, dm_dp_rate_hbr, dm_dp_rate_hbr2, dm_dp_rate_hbr3, dm_dp_rate_uhbr10, dm_dp_rate_uhbr13p5, dm_dp_rate_uhbr20, }; enum dm_fclock_change_support { dm_fclock_change_vactive, dm_fclock_change_vblank, dm_fclock_change_unsupported, }; enum dm_prefetch_modes { dm_prefetch_support_uclk_fclk_and_stutter_if_possible, dm_prefetch_support_uclk_fclk_and_stutter, dm_prefetch_support_fclk_and_stutter, dm_prefetch_support_stutter, dm_prefetch_support_none, }; enum dm_output_type { dm_output_type_unknown, dm_output_type_dp, dm_output_type_edp, dm_output_type_dp2p0, dm_output_type_hdmi, dm_output_type_hdmifrl, }; enum dm_output_rate { dm_output_rate_unknown, dm_output_rate_dp_rate_hbr, dm_output_rate_dp_rate_hbr2, dm_output_rate_dp_rate_hbr3, dm_output_rate_dp_rate_uhbr10, dm_output_rate_dp_rate_uhbr13p5, dm_output_rate_dp_rate_uhbr20, dm_output_rate_hdmi_rate_3x3, dm_output_rate_hdmi_rate_6x3, dm_output_rate_hdmi_rate_6x4, dm_output_rate_hdmi_rate_8x4, dm_output_rate_hdmi_rate_10x4, dm_output_rate_hdmi_rate_12x4, }; #endif
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Export the iSCSI boot info to userland via sysfs. * * Copyright (C) 2010 Red Hat, Inc. All rights reserved. * Copyright (C) 2010 Mike Christie */ #ifndef _ISCSI_BOOT_SYSFS_ #define _ISCSI_BOOT_SYSFS_ /* * The text attributes names for each of the kobjects. */ enum iscsi_boot_eth_properties_enum { ISCSI_BOOT_ETH_INDEX, ISCSI_BOOT_ETH_FLAGS, ISCSI_BOOT_ETH_IP_ADDR, ISCSI_BOOT_ETH_PREFIX_LEN, ISCSI_BOOT_ETH_SUBNET_MASK, ISCSI_BOOT_ETH_ORIGIN, ISCSI_BOOT_ETH_GATEWAY, ISCSI_BOOT_ETH_PRIMARY_DNS, ISCSI_BOOT_ETH_SECONDARY_DNS, ISCSI_BOOT_ETH_DHCP, ISCSI_BOOT_ETH_VLAN, ISCSI_BOOT_ETH_MAC, /* eth_pci_bdf - this is replaced by link to the device itself. */ ISCSI_BOOT_ETH_HOSTNAME, ISCSI_BOOT_ETH_END_MARKER, }; enum iscsi_boot_tgt_properties_enum { ISCSI_BOOT_TGT_INDEX, ISCSI_BOOT_TGT_FLAGS, ISCSI_BOOT_TGT_IP_ADDR, ISCSI_BOOT_TGT_PORT, ISCSI_BOOT_TGT_LUN, ISCSI_BOOT_TGT_CHAP_TYPE, ISCSI_BOOT_TGT_NIC_ASSOC, ISCSI_BOOT_TGT_NAME, ISCSI_BOOT_TGT_CHAP_NAME, ISCSI_BOOT_TGT_CHAP_SECRET, ISCSI_BOOT_TGT_REV_CHAP_NAME, ISCSI_BOOT_TGT_REV_CHAP_SECRET, ISCSI_BOOT_TGT_END_MARKER, }; enum iscsi_boot_initiator_properties_enum { ISCSI_BOOT_INI_INDEX, ISCSI_BOOT_INI_FLAGS, ISCSI_BOOT_INI_ISNS_SERVER, ISCSI_BOOT_INI_SLP_SERVER, ISCSI_BOOT_INI_PRI_RADIUS_SERVER, ISCSI_BOOT_INI_SEC_RADIUS_SERVER, ISCSI_BOOT_INI_INITIATOR_NAME, ISCSI_BOOT_INI_END_MARKER, }; enum iscsi_boot_acpitbl_properties_enum { ISCSI_BOOT_ACPITBL_SIGNATURE, ISCSI_BOOT_ACPITBL_OEM_ID, ISCSI_BOOT_ACPITBL_OEM_TABLE_ID, }; struct attribute_group; struct iscsi_boot_kobj { struct kobject kobj; struct attribute_group *attr_group; struct list_head list; /* * Pointer to store driver specific info. If set this will * be freed for the LLD when the kobj release function is called. */ void *data; /* * Driver specific show function. * * The enum of the type. This can be any value of the above * properties. */ ssize_t (*show) (void *data, int type, char *buf); /* * Drivers specific visibility function. * The function should return if they the attr should be readable * writable or should not be shown. * * The enum of the type. This can be any value of the above * properties. */ umode_t (*is_visible) (void *data, int type); /* * Driver specific release function. * * The function should free the data passed in. */ void (*release) (void *data); }; struct iscsi_boot_kset { struct list_head kobj_list; struct kset *kset; }; struct iscsi_boot_kobj * iscsi_boot_create_initiator(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); struct iscsi_boot_kobj * iscsi_boot_create_ethernet(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); struct iscsi_boot_kobj * iscsi_boot_create_target(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show) (void *data, int type, char *buf), umode_t (*is_visible) (void *data, int type), void (*release) (void *data)); struct iscsi_boot_kobj * iscsi_boot_create_acpitbl(struct iscsi_boot_kset *boot_kset, int index, void *data, ssize_t (*show)(void *data, int type, char *buf), umode_t (*is_visible)(void *data, int type), void (*release)(void *data)); struct iscsi_boot_kset *iscsi_boot_create_kset(const char *set_name); struct iscsi_boot_kset *iscsi_boot_create_host_kset(unsigned int hostno); void iscsi_boot_destroy_kset(struct iscsi_boot_kset *boot_kset); #endif
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2023, Qualcomm Innovation Center, Inc. All rights reserved. */ #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <dt-bindings/clock/qcom,x1e80100-dispcc.h> #include "common.h" #include "clk-alpha-pll.h" #include "clk-branch.h" #include "clk-pll.h" #include "clk-rcg.h" #include "clk-regmap.h" #include "clk-regmap-divider.h" #include "reset.h" #include "gdsc.h" /* Need to match the order of clocks in DT binding */ enum { DT_BI_TCXO, DT_BI_TCXO_AO, DT_AHB_CLK, DT_SLEEP_CLK, DT_DSI0_PHY_PLL_OUT_BYTECLK, DT_DSI0_PHY_PLL_OUT_DSICLK, DT_DSI1_PHY_PLL_OUT_BYTECLK, DT_DSI1_PHY_PLL_OUT_DSICLK, DT_DP0_PHY_PLL_LINK_CLK, DT_DP0_PHY_PLL_VCO_DIV_CLK, DT_DP1_PHY_PLL_LINK_CLK, DT_DP1_PHY_PLL_VCO_DIV_CLK, DT_DP2_PHY_PLL_LINK_CLK, DT_DP2_PHY_PLL_VCO_DIV_CLK, DT_DP3_PHY_PLL_LINK_CLK, DT_DP3_PHY_PLL_VCO_DIV_CLK, }; #define DISP_CC_MISC_CMD 0xF000 enum { P_BI_TCXO, P_BI_TCXO_AO, P_DISP_CC_PLL0_OUT_MAIN, P_DISP_CC_PLL1_OUT_EVEN, P_DISP_CC_PLL1_OUT_MAIN, P_DP0_PHY_PLL_LINK_CLK, P_DP0_PHY_PLL_VCO_DIV_CLK, P_DP1_PHY_PLL_LINK_CLK, P_DP1_PHY_PLL_VCO_DIV_CLK, P_DP2_PHY_PLL_LINK_CLK, P_DP2_PHY_PLL_VCO_DIV_CLK, P_DP3_PHY_PLL_LINK_CLK, P_DP3_PHY_PLL_VCO_DIV_CLK, P_DSI0_PHY_PLL_OUT_BYTECLK, P_DSI0_PHY_PLL_OUT_DSICLK, P_DSI1_PHY_PLL_OUT_BYTECLK, P_DSI1_PHY_PLL_OUT_DSICLK, P_SLEEP_CLK, }; static const struct pll_vco lucid_ole_vco[] = { { 249600000, 2300000000, 0 }, }; static const struct alpha_pll_config disp_cc_pll0_config = { .l = 0xd, .alpha = 0x6492, .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00182261, .config_ctl_hi1_val = 0x82aa299c, .test_ctl_val = 0x00000000, .test_ctl_hi_val = 0x00000003, .test_ctl_hi1_val = 0x00009000, .test_ctl_hi2_val = 0x00000034, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000005, }; static struct clk_alpha_pll disp_cc_pll0 = { .offset = 0x0, .vco_table = lucid_ole_vco, .num_vco = ARRAY_SIZE(lucid_ole_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], .clkr = { .hw.init = &(const struct clk_init_data) { .name = "disp_cc_pll0", .parent_data = &(const struct clk_parent_data) { .index = DT_BI_TCXO, }, .num_parents = 1, .ops = &clk_alpha_pll_reset_lucid_ole_ops, }, }, }; static const struct alpha_pll_config disp_cc_pll1_config = { .l = 0x1f, .alpha = 0x4000, .config_ctl_val = 0x20485699, .config_ctl_hi_val = 0x00182261, .config_ctl_hi1_val = 0x82aa299c, .test_ctl_val = 0x00000000, .test_ctl_hi_val = 0x00000003, .test_ctl_hi1_val = 0x00009000, .test_ctl_hi2_val = 0x00000034, .user_ctl_val = 0x00000000, .user_ctl_hi_val = 0x00000005, }; static struct clk_alpha_pll disp_cc_pll1 = { .offset = 0x1000, .vco_table = lucid_ole_vco, .num_vco = ARRAY_SIZE(lucid_ole_vco), .regs = clk_alpha_pll_regs[CLK_ALPHA_PLL_TYPE_LUCID_OLE], .clkr = { .hw.init = &(const struct clk_init_data) { .name = "disp_cc_pll1", .parent_data = &(const struct clk_parent_data) { .index = DT_BI_TCXO, }, .num_parents = 1, .ops = &clk_alpha_pll_reset_lucid_ole_ops, }, }, }; static const struct parent_map disp_cc_parent_map_0[] = { { P_BI_TCXO, 0 }, { P_DP0_PHY_PLL_LINK_CLK, 1 }, { P_DP0_PHY_PLL_VCO_DIV_CLK, 2 }, { P_DP3_PHY_PLL_VCO_DIV_CLK, 3 }, { P_DP1_PHY_PLL_VCO_DIV_CLK, 4 }, { P_DP2_PHY_PLL_VCO_DIV_CLK, 6 }, }; static const struct clk_parent_data disp_cc_parent_data_0[] = { { .index = DT_BI_TCXO }, { .index = DT_DP0_PHY_PLL_LINK_CLK }, { .index = DT_DP0_PHY_PLL_VCO_DIV_CLK }, { .index = DT_DP3_PHY_PLL_VCO_DIV_CLK }, { .index = DT_DP1_PHY_PLL_VCO_DIV_CLK }, { .index = DT_DP2_PHY_PLL_VCO_DIV_CLK }, }; static const struct parent_map disp_cc_parent_map_1[] = { { P_BI_TCXO, 0 }, }; static const struct clk_parent_data disp_cc_parent_data_1[] = { { .index = DT_BI_TCXO }, }; static const struct clk_parent_data disp_cc_parent_data_1_ao[] = { { .index = DT_BI_TCXO_AO }, }; static const struct parent_map disp_cc_parent_map_2[] = { { P_BI_TCXO, 0 }, { P_DSI0_PHY_PLL_OUT_DSICLK, 1 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, { P_DSI1_PHY_PLL_OUT_DSICLK, 3 }, { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, }; static const struct clk_parent_data disp_cc_parent_data_2[] = { { .index = DT_BI_TCXO }, { .index = DT_DSI0_PHY_PLL_OUT_DSICLK }, { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, { .index = DT_DSI1_PHY_PLL_OUT_DSICLK }, { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, }; static const struct parent_map disp_cc_parent_map_3[] = { { P_BI_TCXO, 0 }, { P_DP0_PHY_PLL_LINK_CLK, 1 }, { P_DP1_PHY_PLL_LINK_CLK, 2 }, { P_DP2_PHY_PLL_LINK_CLK, 3 }, { P_DP3_PHY_PLL_LINK_CLK, 4 }, }; static const struct clk_parent_data disp_cc_parent_data_3[] = { { .index = DT_BI_TCXO }, { .index = DT_DP0_PHY_PLL_LINK_CLK }, { .index = DT_DP1_PHY_PLL_LINK_CLK }, { .index = DT_DP2_PHY_PLL_LINK_CLK }, { .index = DT_DP3_PHY_PLL_LINK_CLK }, }; static const struct parent_map disp_cc_parent_map_4[] = { { P_BI_TCXO, 0 }, { P_DSI0_PHY_PLL_OUT_BYTECLK, 2 }, { P_DSI1_PHY_PLL_OUT_BYTECLK, 4 }, }; static const struct clk_parent_data disp_cc_parent_data_4[] = { { .index = DT_BI_TCXO }, { .index = DT_DSI0_PHY_PLL_OUT_BYTECLK }, { .index = DT_DSI1_PHY_PLL_OUT_BYTECLK }, }; static const struct parent_map disp_cc_parent_map_5[] = { { P_BI_TCXO, 0 }, { P_DISP_CC_PLL1_OUT_MAIN, 4 }, { P_DISP_CC_PLL1_OUT_EVEN, 6 }, }; static const struct clk_parent_data disp_cc_parent_data_5[] = { { .index = DT_BI_TCXO }, { .hw = &disp_cc_pll1.clkr.hw }, { .hw = &disp_cc_pll1.clkr.hw }, }; static const struct parent_map disp_cc_parent_map_6[] = { { P_BI_TCXO, 0 }, { P_DISP_CC_PLL0_OUT_MAIN, 1 }, { P_DISP_CC_PLL1_OUT_MAIN, 4 }, { P_DISP_CC_PLL1_OUT_EVEN, 6 }, }; static const struct clk_parent_data disp_cc_parent_data_6[] = { { .index = DT_BI_TCXO }, { .hw = &disp_cc_pll0.clkr.hw }, { .hw = &disp_cc_pll1.clkr.hw }, { .hw = &disp_cc_pll1.clkr.hw }, }; static const struct parent_map disp_cc_parent_map_7[] = { { P_SLEEP_CLK, 0 }, }; static const struct clk_parent_data disp_cc_parent_data_7[] = { { .index = DT_SLEEP_CLK }, }; static const struct freq_tbl ftbl_disp_cc_mdss_ahb_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), F(37500000, P_DISP_CC_PLL1_OUT_MAIN, 16, 0, 0), F(75000000, P_DISP_CC_PLL1_OUT_MAIN, 8, 0, 0), { } }; static struct clk_rcg2 disp_cc_mdss_ahb_clk_src = { .cmd_rcgr = 0x82ec, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_5, .freq_tbl = ftbl_disp_cc_mdss_ahb_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_ahb_clk_src", .parent_data = disp_cc_parent_data_5, .num_parents = ARRAY_SIZE(disp_cc_parent_data_5), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_disp_cc_mdss_byte0_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), { } }; static struct clk_rcg2 disp_cc_mdss_byte0_clk_src = { .cmd_rcgr = 0x810c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_byte1_clk_src = { .cmd_rcgr = 0x8128, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx0_aux_clk_src = { .cmd_rcgr = 0x81c0, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_aux_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx0_link_clk_src = { .cmd_rcgr = 0x8174, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx0_pixel0_clk_src = { .cmd_rcgr = 0x8190, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel0_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx0_pixel1_clk_src = { .cmd_rcgr = 0x81a8, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel1_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx1_aux_clk_src = { .cmd_rcgr = 0x8224, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_aux_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx1_link_clk_src = { .cmd_rcgr = 0x8208, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx1_pixel0_clk_src = { .cmd_rcgr = 0x81d8, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel0_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx1_pixel1_clk_src = { .cmd_rcgr = 0x81f0, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel1_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx2_aux_clk_src = { .cmd_rcgr = 0x8288, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_aux_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx2_link_clk_src = { .cmd_rcgr = 0x823c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx2_pixel0_clk_src = { .cmd_rcgr = 0x8258, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel0_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx2_pixel1_clk_src = { .cmd_rcgr = 0x8270, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel1_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx3_aux_clk_src = { .cmd_rcgr = 0x82d4, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_aux_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx3_link_clk_src = { .cmd_rcgr = 0x82b8, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_3, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_clk_src", .parent_data = disp_cc_parent_data_3, .num_parents = ARRAY_SIZE(disp_cc_parent_data_3), .flags = CLK_SET_RATE_PARENT, .ops = &clk_byte2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_dptx3_pixel0_clk_src = { .cmd_rcgr = 0x82a0, .mnd_width = 16, .hid_width = 5, .parent_map = disp_cc_parent_map_0, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_pixel0_clk_src", .parent_data = disp_cc_parent_data_0, .num_parents = ARRAY_SIZE(disp_cc_parent_data_0), .flags = CLK_SET_RATE_PARENT, .ops = &clk_dp_ops, }, }; static struct clk_rcg2 disp_cc_mdss_esc0_clk_src = { .cmd_rcgr = 0x8144, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_4, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc0_clk_src", .parent_data = disp_cc_parent_data_4, .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_mdss_esc1_clk_src = { .cmd_rcgr = 0x815c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_4, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc1_clk_src", .parent_data = disp_cc_parent_data_4, .num_parents = ARRAY_SIZE(disp_cc_parent_data_4), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_disp_cc_mdss_mdp_clk_src[] = { F(19200000, P_BI_TCXO, 1, 0, 0), F(85714286, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(100000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(150000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(172000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(200000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(325000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(375000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(514000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), F(575000000, P_DISP_CC_PLL0_OUT_MAIN, 3, 0, 0), { } }; static struct clk_rcg2 disp_cc_mdss_mdp_clk_src = { .cmd_rcgr = 0x80dc, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_6, .freq_tbl = ftbl_disp_cc_mdss_mdp_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_clk_src", .parent_data = disp_cc_parent_data_6, .num_parents = ARRAY_SIZE(disp_cc_parent_data_6), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_shared_ops, }, }; static struct clk_rcg2 disp_cc_mdss_pclk0_clk_src = { .cmd_rcgr = 0x80ac, .mnd_width = 8, .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk0_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_pixel_ops, }, }; static struct clk_rcg2 disp_cc_mdss_pclk1_clk_src = { .cmd_rcgr = 0x80c4, .mnd_width = 8, .hid_width = 5, .parent_map = disp_cc_parent_map_2, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk1_clk_src", .parent_data = disp_cc_parent_data_2, .num_parents = ARRAY_SIZE(disp_cc_parent_data_2), .flags = CLK_SET_RATE_PARENT, .ops = &clk_pixel_ops, }, }; static struct clk_rcg2 disp_cc_mdss_vsync_clk_src = { .cmd_rcgr = 0x80f4, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_vsync_clk_src", .parent_data = disp_cc_parent_data_1, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static const struct freq_tbl ftbl_disp_cc_sleep_clk_src[] = { F(32000, P_SLEEP_CLK, 1, 0, 0), { } }; static struct clk_rcg2 disp_cc_sleep_clk_src = { .cmd_rcgr = 0xe05c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_7, .freq_tbl = ftbl_disp_cc_sleep_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_sleep_clk_src", .parent_data = disp_cc_parent_data_7, .num_parents = ARRAY_SIZE(disp_cc_parent_data_7), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_rcg2 disp_cc_xo_clk_src = { .cmd_rcgr = 0xe03c, .mnd_width = 0, .hid_width = 5, .parent_map = disp_cc_parent_map_1, .freq_tbl = ftbl_disp_cc_mdss_byte0_clk_src, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_xo_clk_src", .parent_data = disp_cc_parent_data_1_ao, .num_parents = ARRAY_SIZE(disp_cc_parent_data_1_ao), .flags = CLK_SET_RATE_PARENT, .ops = &clk_rcg2_ops, }, }; static struct clk_regmap_div disp_cc_mdss_byte0_div_clk_src = { .reg = 0x8124, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_regmap_div disp_cc_mdss_byte1_div_clk_src = { .reg = 0x8140, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_regmap_div disp_cc_mdss_dptx0_link_div_clk_src = { .reg = 0x818c, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_regmap_div disp_cc_mdss_dptx1_link_div_clk_src = { .reg = 0x8220, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_regmap_div disp_cc_mdss_dptx2_link_div_clk_src = { .reg = 0x8254, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_regmap_div disp_cc_mdss_dptx3_link_div_clk_src = { .reg = 0x82d0, .shift = 0, .width = 4, .clkr.hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_div_clk_src", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_regmap_div_ro_ops, }, }; static struct clk_branch disp_cc_mdss_accu_clk = { .halt_reg = 0xe058, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0xe058, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_accu_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_xo_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_ahb1_clk = { .halt_reg = 0xa020, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xa020, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_ahb1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_ahb_clk = { .halt_reg = 0x80a8, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x80a8, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_byte0_clk = { .halt_reg = 0x8028, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8028, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_byte0_intf_clk = { .halt_reg = 0x802c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x802c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte0_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte0_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_byte1_clk = { .halt_reg = 0x8030, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8030, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_byte1_intf_clk = { .halt_reg = 0x8034, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8034, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_byte1_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_byte1_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_aux_clk = { .halt_reg = 0x8058, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8058, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_aux_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_link_clk = { .halt_reg = 0x8040, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8040, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_link_intf_clk = { .halt_reg = 0x8048, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8048, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_pixel0_clk = { .halt_reg = 0x8050, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8050, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_pixel0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_pixel1_clk = { .halt_reg = 0x8054, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8054, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_pixel1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx0_usb_router_link_intf_clk = { .halt_reg = 0x8044, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8044, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx0_usb_router_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx0_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_aux_clk = { .halt_reg = 0x8074, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8074, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_aux_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_link_clk = { .halt_reg = 0x8064, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8064, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_link_intf_clk = { .halt_reg = 0x806c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x806c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_pixel0_clk = { .halt_reg = 0x805c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x805c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_pixel0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_pixel1_clk = { .halt_reg = 0x8060, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8060, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_pixel1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx1_usb_router_link_intf_clk = { .halt_reg = 0x8068, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8068, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx1_usb_router_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx1_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_aux_clk = { .halt_reg = 0x8090, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8090, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_aux_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_link_clk = { .halt_reg = 0x8080, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8080, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_link_intf_clk = { .halt_reg = 0x8084, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8084, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_pixel0_clk = { .halt_reg = 0x8078, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8078, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_pixel0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_pixel1_clk = { .halt_reg = 0x807c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x807c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_pixel1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_pixel1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx2_usb_router_link_intf_clk = { .halt_reg = 0x8088, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8088, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx2_usb_router_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx2_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx3_aux_clk = { .halt_reg = 0x80a0, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x80a0, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_aux_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_aux_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx3_link_clk = { .halt_reg = 0x8098, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8098, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx3_link_intf_clk = { .halt_reg = 0x809c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x809c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_link_intf_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_link_div_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_dptx3_pixel0_clk = { .halt_reg = 0x8094, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8094, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_dptx3_pixel0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_dptx3_pixel0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_esc0_clk = { .halt_reg = 0x8038, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8038, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_esc0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_esc1_clk = { .halt_reg = 0x803c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x803c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_esc1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_esc1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_mdp1_clk = { .halt_reg = 0xa004, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xa004, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_mdp_clk = { .halt_reg = 0x800c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x800c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_mdp_lut1_clk = { .halt_reg = 0xa010, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xa010, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_lut1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_mdp_lut_clk = { .halt_reg = 0x8018, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0x8018, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_mdp_lut_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_mdp_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_non_gdsc_ahb_clk = { .halt_reg = 0xc004, .halt_check = BRANCH_HALT_VOTED, .clkr = { .enable_reg = 0xc004, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_non_gdsc_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_pclk0_clk = { .halt_reg = 0x8004, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8004, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk0_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_pclk0_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_pclk1_clk = { .halt_reg = 0x8008, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8008, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_pclk1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_pclk1_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_rscc_ahb_clk = { .halt_reg = 0xc00c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xc00c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_rscc_ahb_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_ahb_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_rscc_vsync_clk = { .halt_reg = 0xc008, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xc008, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_rscc_vsync_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_vsync_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_vsync1_clk = { .halt_reg = 0xa01c, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0xa01c, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_vsync1_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_vsync_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct clk_branch disp_cc_mdss_vsync_clk = { .halt_reg = 0x8024, .halt_check = BRANCH_HALT, .clkr = { .enable_reg = 0x8024, .enable_mask = BIT(0), .hw.init = &(const struct clk_init_data) { .name = "disp_cc_mdss_vsync_clk", .parent_hws = (const struct clk_hw*[]) { &disp_cc_mdss_vsync_clk_src.clkr.hw, }, .num_parents = 1, .flags = CLK_SET_RATE_PARENT, .ops = &clk_branch2_ops, }, }, }; static struct gdsc mdss_gdsc = { .gdscr = 0x9000, .en_rest_wait_val = 0x2, .en_few_wait_val = 0x2, .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_gdsc", }, .pwrsts = PWRSTS_OFF_ON, .flags = HW_CTRL | RETAIN_FF_ENABLE, }; static struct gdsc mdss_int2_gdsc = { .gdscr = 0xb000, .en_rest_wait_val = 0x2, .en_few_wait_val = 0x2, .clk_dis_wait_val = 0xf, .pd = { .name = "mdss_int2_gdsc", }, .pwrsts = PWRSTS_OFF_ON, .flags = HW_CTRL | RETAIN_FF_ENABLE, }; static struct clk_regmap *disp_cc_x1e80100_clocks[] = { [DISP_CC_MDSS_ACCU_CLK] = &disp_cc_mdss_accu_clk.clkr, [DISP_CC_MDSS_AHB1_CLK] = &disp_cc_mdss_ahb1_clk.clkr, [DISP_CC_MDSS_AHB_CLK] = &disp_cc_mdss_ahb_clk.clkr, [DISP_CC_MDSS_AHB_CLK_SRC] = &disp_cc_mdss_ahb_clk_src.clkr, [DISP_CC_MDSS_BYTE0_CLK] = &disp_cc_mdss_byte0_clk.clkr, [DISP_CC_MDSS_BYTE0_CLK_SRC] = &disp_cc_mdss_byte0_clk_src.clkr, [DISP_CC_MDSS_BYTE0_DIV_CLK_SRC] = &disp_cc_mdss_byte0_div_clk_src.clkr, [DISP_CC_MDSS_BYTE0_INTF_CLK] = &disp_cc_mdss_byte0_intf_clk.clkr, [DISP_CC_MDSS_BYTE1_CLK] = &disp_cc_mdss_byte1_clk.clkr, [DISP_CC_MDSS_BYTE1_CLK_SRC] = &disp_cc_mdss_byte1_clk_src.clkr, [DISP_CC_MDSS_BYTE1_DIV_CLK_SRC] = &disp_cc_mdss_byte1_div_clk_src.clkr, [DISP_CC_MDSS_BYTE1_INTF_CLK] = &disp_cc_mdss_byte1_intf_clk.clkr, [DISP_CC_MDSS_DPTX0_AUX_CLK] = &disp_cc_mdss_dptx0_aux_clk.clkr, [DISP_CC_MDSS_DPTX0_AUX_CLK_SRC] = &disp_cc_mdss_dptx0_aux_clk_src.clkr, [DISP_CC_MDSS_DPTX0_LINK_CLK] = &disp_cc_mdss_dptx0_link_clk.clkr, [DISP_CC_MDSS_DPTX0_LINK_CLK_SRC] = &disp_cc_mdss_dptx0_link_clk_src.clkr, [DISP_CC_MDSS_DPTX0_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx0_link_div_clk_src.clkr, [DISP_CC_MDSS_DPTX0_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX0_PIXEL0_CLK] = &disp_cc_mdss_dptx0_pixel0_clk.clkr, [DISP_CC_MDSS_DPTX0_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx0_pixel0_clk_src.clkr, [DISP_CC_MDSS_DPTX0_PIXEL1_CLK] = &disp_cc_mdss_dptx0_pixel1_clk.clkr, [DISP_CC_MDSS_DPTX0_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx0_pixel1_clk_src.clkr, [DISP_CC_MDSS_DPTX0_USB_ROUTER_LINK_INTF_CLK] = &disp_cc_mdss_dptx0_usb_router_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX1_AUX_CLK] = &disp_cc_mdss_dptx1_aux_clk.clkr, [DISP_CC_MDSS_DPTX1_AUX_CLK_SRC] = &disp_cc_mdss_dptx1_aux_clk_src.clkr, [DISP_CC_MDSS_DPTX1_LINK_CLK] = &disp_cc_mdss_dptx1_link_clk.clkr, [DISP_CC_MDSS_DPTX1_LINK_CLK_SRC] = &disp_cc_mdss_dptx1_link_clk_src.clkr, [DISP_CC_MDSS_DPTX1_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx1_link_div_clk_src.clkr, [DISP_CC_MDSS_DPTX1_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX1_PIXEL0_CLK] = &disp_cc_mdss_dptx1_pixel0_clk.clkr, [DISP_CC_MDSS_DPTX1_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx1_pixel0_clk_src.clkr, [DISP_CC_MDSS_DPTX1_PIXEL1_CLK] = &disp_cc_mdss_dptx1_pixel1_clk.clkr, [DISP_CC_MDSS_DPTX1_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx1_pixel1_clk_src.clkr, [DISP_CC_MDSS_DPTX1_USB_ROUTER_LINK_INTF_CLK] = &disp_cc_mdss_dptx1_usb_router_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX2_AUX_CLK] = &disp_cc_mdss_dptx2_aux_clk.clkr, [DISP_CC_MDSS_DPTX2_AUX_CLK_SRC] = &disp_cc_mdss_dptx2_aux_clk_src.clkr, [DISP_CC_MDSS_DPTX2_LINK_CLK] = &disp_cc_mdss_dptx2_link_clk.clkr, [DISP_CC_MDSS_DPTX2_LINK_CLK_SRC] = &disp_cc_mdss_dptx2_link_clk_src.clkr, [DISP_CC_MDSS_DPTX2_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx2_link_div_clk_src.clkr, [DISP_CC_MDSS_DPTX2_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX2_PIXEL0_CLK] = &disp_cc_mdss_dptx2_pixel0_clk.clkr, [DISP_CC_MDSS_DPTX2_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx2_pixel0_clk_src.clkr, [DISP_CC_MDSS_DPTX2_PIXEL1_CLK] = &disp_cc_mdss_dptx2_pixel1_clk.clkr, [DISP_CC_MDSS_DPTX2_PIXEL1_CLK_SRC] = &disp_cc_mdss_dptx2_pixel1_clk_src.clkr, [DISP_CC_MDSS_DPTX2_USB_ROUTER_LINK_INTF_CLK] = &disp_cc_mdss_dptx2_usb_router_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX3_AUX_CLK] = &disp_cc_mdss_dptx3_aux_clk.clkr, [DISP_CC_MDSS_DPTX3_AUX_CLK_SRC] = &disp_cc_mdss_dptx3_aux_clk_src.clkr, [DISP_CC_MDSS_DPTX3_LINK_CLK] = &disp_cc_mdss_dptx3_link_clk.clkr, [DISP_CC_MDSS_DPTX3_LINK_CLK_SRC] = &disp_cc_mdss_dptx3_link_clk_src.clkr, [DISP_CC_MDSS_DPTX3_LINK_DIV_CLK_SRC] = &disp_cc_mdss_dptx3_link_div_clk_src.clkr, [DISP_CC_MDSS_DPTX3_LINK_INTF_CLK] = &disp_cc_mdss_dptx3_link_intf_clk.clkr, [DISP_CC_MDSS_DPTX3_PIXEL0_CLK] = &disp_cc_mdss_dptx3_pixel0_clk.clkr, [DISP_CC_MDSS_DPTX3_PIXEL0_CLK_SRC] = &disp_cc_mdss_dptx3_pixel0_clk_src.clkr, [DISP_CC_MDSS_ESC0_CLK] = &disp_cc_mdss_esc0_clk.clkr, [DISP_CC_MDSS_ESC0_CLK_SRC] = &disp_cc_mdss_esc0_clk_src.clkr, [DISP_CC_MDSS_ESC1_CLK] = &disp_cc_mdss_esc1_clk.clkr, [DISP_CC_MDSS_ESC1_CLK_SRC] = &disp_cc_mdss_esc1_clk_src.clkr, [DISP_CC_MDSS_MDP1_CLK] = &disp_cc_mdss_mdp1_clk.clkr, [DISP_CC_MDSS_MDP_CLK] = &disp_cc_mdss_mdp_clk.clkr, [DISP_CC_MDSS_MDP_CLK_SRC] = &disp_cc_mdss_mdp_clk_src.clkr, [DISP_CC_MDSS_MDP_LUT1_CLK] = &disp_cc_mdss_mdp_lut1_clk.clkr, [DISP_CC_MDSS_MDP_LUT_CLK] = &disp_cc_mdss_mdp_lut_clk.clkr, [DISP_CC_MDSS_NON_GDSC_AHB_CLK] = &disp_cc_mdss_non_gdsc_ahb_clk.clkr, [DISP_CC_MDSS_PCLK0_CLK] = &disp_cc_mdss_pclk0_clk.clkr, [DISP_CC_MDSS_PCLK0_CLK_SRC] = &disp_cc_mdss_pclk0_clk_src.clkr, [DISP_CC_MDSS_PCLK1_CLK] = &disp_cc_mdss_pclk1_clk.clkr, [DISP_CC_MDSS_PCLK1_CLK_SRC] = &disp_cc_mdss_pclk1_clk_src.clkr, [DISP_CC_MDSS_RSCC_AHB_CLK] = &disp_cc_mdss_rscc_ahb_clk.clkr, [DISP_CC_MDSS_RSCC_VSYNC_CLK] = &disp_cc_mdss_rscc_vsync_clk.clkr, [DISP_CC_MDSS_VSYNC1_CLK] = &disp_cc_mdss_vsync1_clk.clkr, [DISP_CC_MDSS_VSYNC_CLK] = &disp_cc_mdss_vsync_clk.clkr, [DISP_CC_MDSS_VSYNC_CLK_SRC] = &disp_cc_mdss_vsync_clk_src.clkr, [DISP_CC_PLL0] = &disp_cc_pll0.clkr, [DISP_CC_PLL1] = &disp_cc_pll1.clkr, [DISP_CC_SLEEP_CLK_SRC] = &disp_cc_sleep_clk_src.clkr, [DISP_CC_XO_CLK_SRC] = &disp_cc_xo_clk_src.clkr, }; static const struct qcom_reset_map disp_cc_x1e80100_resets[] = { [DISP_CC_MDSS_CORE_BCR] = { 0x8000 }, [DISP_CC_MDSS_CORE_INT2_BCR] = { 0xa000 }, [DISP_CC_MDSS_RSCC_BCR] = { 0xc000 }, }; static struct gdsc *disp_cc_x1e80100_gdscs[] = { [MDSS_GDSC] = &mdss_gdsc, [MDSS_INT2_GDSC] = &mdss_int2_gdsc, }; static const struct regmap_config disp_cc_x1e80100_regmap_config = { .reg_bits = 32, .reg_stride = 4, .val_bits = 32, .max_register = 0x11008, .fast_io = true, }; static const struct qcom_cc_desc disp_cc_x1e80100_desc = { .config = &disp_cc_x1e80100_regmap_config, .clks = disp_cc_x1e80100_clocks, .num_clks = ARRAY_SIZE(disp_cc_x1e80100_clocks), .resets = disp_cc_x1e80100_resets, .num_resets = ARRAY_SIZE(disp_cc_x1e80100_resets), .gdscs = disp_cc_x1e80100_gdscs, .num_gdscs = ARRAY_SIZE(disp_cc_x1e80100_gdscs), }; static const struct of_device_id disp_cc_x1e80100_match_table[] = { { .compatible = "qcom,x1e80100-dispcc" }, { } }; MODULE_DEVICE_TABLE(of, disp_cc_x1e80100_match_table); static int disp_cc_x1e80100_probe(struct platform_device *pdev) { struct regmap *regmap; int ret; ret = devm_pm_runtime_enable(&pdev->dev); if (ret) return ret; ret = pm_runtime_resume_and_get(&pdev->dev); if (ret) return ret; regmap = qcom_cc_map(pdev, &disp_cc_x1e80100_desc); if (IS_ERR(regmap)) { ret = PTR_ERR(regmap); goto err_put_rpm; } clk_lucid_evo_pll_configure(&disp_cc_pll0, regmap, &disp_cc_pll0_config); clk_lucid_evo_pll_configure(&disp_cc_pll1, regmap, &disp_cc_pll1_config); /* Enable clock gating for MDP clocks */ regmap_update_bits(regmap, DISP_CC_MISC_CMD, 0x10, 0x10); /* Keep clocks always enabled */ qcom_branch_set_clk_en(regmap, 0xe074); /* DISP_CC_SLEEP_CLK */ qcom_branch_set_clk_en(regmap, 0xe054); /* DISP_CC_XO_CLK */ ret = qcom_cc_really_probe(&pdev->dev, &disp_cc_x1e80100_desc, regmap); if (ret) goto err_put_rpm; pm_runtime_put(&pdev->dev); return 0; err_put_rpm: pm_runtime_put_sync(&pdev->dev); return ret; } static struct platform_driver disp_cc_x1e80100_driver = { .probe = disp_cc_x1e80100_probe, .driver = { .name = "dispcc-x1e80100", .of_match_table = disp_cc_x1e80100_match_table, }, }; static int __init disp_cc_x1e80100_init(void) { return platform_driver_register(&disp_cc_x1e80100_driver); } subsys_initcall(disp_cc_x1e80100_init); static void __exit disp_cc_x1e80100_exit(void) { platform_driver_unregister(&disp_cc_x1e80100_driver); } module_exit(disp_cc_x1e80100_exit); MODULE_DESCRIPTION("QTI Display Clock Controller X1E80100 Driver"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __KVM_X86_MMU_INTERNAL_H #define __KVM_X86_MMU_INTERNAL_H #include <linux/types.h> #include <linux/kvm_host.h> #include <asm/kvm_host.h> #ifdef CONFIG_KVM_PROVE_MMU #define KVM_MMU_WARN_ON(x) WARN_ON_ONCE(x) #else #define KVM_MMU_WARN_ON(x) BUILD_BUG_ON_INVALID(x) #endif /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */ #define __PT_BASE_ADDR_MASK GENMASK_ULL(51, 12) #define __PT_LEVEL_SHIFT(level, bits_per_level) \ (PAGE_SHIFT + ((level) - 1) * (bits_per_level)) #define __PT_INDEX(address, level, bits_per_level) \ (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1)) #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \ ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1)) #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \ ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1)) #define __PT_ENT_PER_PAGE(bits_per_level) (1 << (bits_per_level)) /* * Unlike regular MMU roots, PAE "roots", a.k.a. PDPTEs/PDPTRs, have a PRESENT * bit, and thus are guaranteed to be non-zero when valid. And, when a guest * PDPTR is !PRESENT, its corresponding PAE root cannot be set to INVALID_PAGE, * as the CPU would treat that as PRESENT PDPTR with reserved bits set. Use * '0' instead of INVALID_PAGE to indicate an invalid PAE root. */ #define INVALID_PAE_ROOT 0 #define IS_VALID_PAE_ROOT(x) (!!(x)) static inline hpa_t kvm_mmu_get_dummy_root(void) { return my_zero_pfn(0) << PAGE_SHIFT; } static inline bool kvm_mmu_is_dummy_root(hpa_t shadow_page) { return is_zero_pfn(shadow_page >> PAGE_SHIFT); } typedef u64 __rcu *tdp_ptep_t; struct kvm_mmu_page { /* * Note, "link" through "spt" fit in a single 64 byte cache line on * 64-bit kernels, keep it that way unless there's a reason not to. */ struct list_head link; struct hlist_node hash_link; bool tdp_mmu_page; bool unsync; union { u8 mmu_valid_gen; /* Only accessed under slots_lock. */ bool tdp_mmu_scheduled_root_to_zap; }; /* * The shadow page can't be replaced by an equivalent huge page * because it is being used to map an executable page in the guest * and the NX huge page mitigation is enabled. */ bool nx_huge_page_disallowed; /* * The following two entries are used to key the shadow page in the * hash table. */ union kvm_mmu_page_role role; gfn_t gfn; u64 *spt; /* * Stores the result of the guest translation being shadowed by each * SPTE. KVM shadows two types of guest translations: nGPA -> GPA * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both * cases the result of the translation is a GPA and a set of access * constraints. * * The GFN is stored in the upper bits (PAGE_SHIFT) and the shadowed * access permissions are stored in the lower bits. Note, for * convenience and uniformity across guests, the access permissions are * stored in KVM format (e.g. ACC_EXEC_MASK) not the raw guest format. */ u64 *shadowed_translation; /* Currently serving as active root */ union { int root_count; refcount_t tdp_mmu_root_count; }; unsigned int unsync_children; union { struct kvm_rmap_head parent_ptes; /* rmap pointers to parent sptes */ tdp_ptep_t ptep; }; DECLARE_BITMAP(unsync_child_bitmap, 512); /* * Tracks shadow pages that, if zapped, would allow KVM to create an NX * huge page. A shadow page will have nx_huge_page_disallowed set but * not be on the list if a huge page is disallowed for other reasons, * e.g. because KVM is shadowing a PTE at the same gfn, the memslot * isn't properly aligned, etc... */ struct list_head possible_nx_huge_page_link; #ifdef CONFIG_X86_32 /* * Used out of the mmu-lock to avoid reading spte values while an * update is in progress; see the comments in __get_spte_lockless(). */ int clear_spte_count; #endif /* Number of writes since the last time traversal visited this page. */ atomic_t write_flooding_count; #ifdef CONFIG_X86_64 /* Used for freeing the page asynchronously if it is a TDP MMU page. */ struct rcu_head rcu_head; #endif }; extern struct kmem_cache *mmu_page_header_cache; static inline int kvm_mmu_role_as_id(union kvm_mmu_page_role role) { return role.smm ? 1 : 0; } static inline int kvm_mmu_page_as_id(struct kvm_mmu_page *sp) { return kvm_mmu_role_as_id(sp->role); } static inline bool kvm_mmu_page_ad_need_write_protect(struct kvm_mmu_page *sp) { /* * When using the EPT page-modification log, the GPAs in the CPU dirty * log would come from L2 rather than L1. Therefore, we need to rely * on write protection to record dirty pages, which bypasses PML, since * writes now result in a vmexit. Note, the check on CPU dirty logging * being enabled is mandatory as the bits used to denote WP-only SPTEs * are reserved for PAE paging (32-bit KVM). */ return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode; } static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) { return gfn & -KVM_PAGES_PER_HPAGE(level); } int mmu_try_to_unsync_pages(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn, bool synchronizing, bool prefetch); void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn); bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm, struct kvm_memory_slot *slot, u64 gfn, int min_level); /* Flush the given page (huge or not) of guest memory. */ static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) { kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), KVM_PAGES_PER_HPAGE(level)); } unsigned int pte_list_count(struct kvm_rmap_head *rmap_head); extern int nx_huge_pages; static inline bool is_nx_huge_page_enabled(struct kvm *kvm) { return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages; } struct kvm_page_fault { /* arguments to kvm_mmu_do_page_fault. */ const gpa_t addr; const u64 error_code; const bool prefetch; /* Derived from error_code. */ const bool exec; const bool write; const bool present; const bool rsvd; const bool user; /* Derived from mmu and global state. */ const bool is_tdp; const bool is_private; const bool nx_huge_page_workaround_enabled; /* * Whether a >4KB mapping can be created or is forbidden due to NX * hugepages. */ bool huge_page_disallowed; /* * Maximum page size that can be created for this fault; input to * FNAME(fetch), direct_map() and kvm_tdp_mmu_map(). */ u8 max_level; /* * Page size that can be created based on the max_level and the * page size used by the host mapping. */ u8 req_level; /* * Page size that will be created based on the req_level and * huge_page_disallowed. */ u8 goal_level; /* Shifted addr, or result of guest page table walk if addr is a gva. */ gfn_t gfn; /* The memslot containing gfn. May be NULL. */ struct kvm_memory_slot *slot; /* Outputs of kvm_mmu_faultin_pfn(). */ unsigned long mmu_seq; kvm_pfn_t pfn; struct page *refcounted_page; bool map_writable; /* * Indicates the guest is trying to write a gfn that contains one or * more of the PTEs used to translate the write itself, i.e. the access * is changing its own translation in the guest page tables. */ bool write_fault_to_shadow_pgtable; }; int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); /* * Return values of handle_mmio_page_fault(), mmu.page_fault(), fast_page_fault(), * and of course kvm_mmu_do_page_fault(). * * RET_PF_CONTINUE: So far, so good, keep handling the page fault. * RET_PF_RETRY: let CPU fault again on the address. * RET_PF_EMULATE: mmio page fault, emulate the instruction directly. * RET_PF_WRITE_PROTECTED: the gfn is write-protected, either unprotected the * gfn and retry, or emulate the instruction directly. * RET_PF_INVALID: the spte is invalid, let the real page fault path update it. * RET_PF_FIXED: The faulting entry has been fixed. * RET_PF_SPURIOUS: The faulting entry was already fixed, e.g. by another vCPU. * * Any names added to this enum should be exported to userspace for use in * tracepoints via TRACE_DEFINE_ENUM() in mmutrace.h * * Note, all values must be greater than or equal to zero so as not to encroach * on -errno return values. Somewhat arbitrarily use '0' for CONTINUE, which * will allow for efficient machine code when checking for CONTINUE, e.g. * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero. */ enum { RET_PF_CONTINUE = 0, RET_PF_RETRY, RET_PF_EMULATE, RET_PF_WRITE_PROTECTED, RET_PF_INVALID, RET_PF_FIXED, RET_PF_SPURIOUS, }; static inline void kvm_mmu_prepare_memory_fault_exit(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault) { kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, PAGE_SIZE, fault->write, fault->exec, fault->is_private); } static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 err, bool prefetch, int *emulation_type, u8 *level) { struct kvm_page_fault fault = { .addr = cr2_or_gpa, .error_code = err, .exec = err & PFERR_FETCH_MASK, .write = err & PFERR_WRITE_MASK, .present = err & PFERR_PRESENT_MASK, .rsvd = err & PFERR_RSVD_MASK, .user = err & PFERR_USER_MASK, .prefetch = prefetch, .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm), .max_level = KVM_MAX_HUGEPAGE_LEVEL, .req_level = PG_LEVEL_4K, .goal_level = PG_LEVEL_4K, .is_private = err & PFERR_PRIVATE_ACCESS, .pfn = KVM_PFN_ERR_FAULT, }; int r; if (vcpu->arch.mmu->root_role.direct) { fault.gfn = fault.addr >> PAGE_SHIFT; fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); } if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp) r = kvm_tdp_page_fault(vcpu, &fault); else r = vcpu->arch.mmu->page_fault(vcpu, &fault); /* * Not sure what's happening, but punt to userspace and hope that * they can fix it by changing memory to shared, or they can * provide a better error. */ if (r == RET_PF_EMULATE && fault.is_private) { pr_warn_ratelimited("kvm: unexpected emulation request on private memory\n"); kvm_mmu_prepare_memory_fault_exit(vcpu, &fault); return -EFAULT; } if (fault.write_fault_to_shadow_pgtable && emulation_type) *emulation_type |= EMULTYPE_WRITE_PF_TO_SP; if (level) *level = fault.goal_level; return r; } int kvm_mmu_max_mapping_level(struct kvm *kvm, const struct kvm_memory_slot *slot, gfn_t gfn); void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault); void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level); void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp); #endif /* __KVM_X86_MMU_INTERNAL_H */
/* SPDX-License-Identifier: GPL-2.0+ OR BSD-3-Clause */ /* * Copyright (C) 2020, STMicroelectronics - All Rights Reserved * * Configuration settings for the STM32MP13x CPU */ #ifndef STM32MP13_RCC_H #define STM32MP13_RCC_H /* RCC registers */ #define RCC_SECCFGR 0x0 #define RCC_MP_SREQSETR 0x100 #define RCC_MP_SREQCLRR 0x104 #define RCC_MP_APRSTCR 0x108 #define RCC_MP_APRSTSR 0x10c #define RCC_PWRLPDLYCR 0x110 #define RCC_MP_GRSTCSETR 0x114 #define RCC_BR_RSTSCLRR 0x118 #define RCC_MP_RSTSSETR 0x11c #define RCC_MP_RSTSCLRR 0x120 #define RCC_MP_IWDGFZSETR 0x124 #define RCC_MP_IWDGFZCLRR 0x128 #define RCC_MP_CIER 0x200 #define RCC_MP_CIFR 0x204 #define RCC_BDCR 0x400 #define RCC_RDLSICR 0x404 #define RCC_OCENSETR 0x420 #define RCC_OCENCLRR 0x424 #define RCC_OCRDYR 0x428 #define RCC_HSICFGR 0x440 #define RCC_CSICFGR 0x444 #define RCC_MCO1CFGR 0x460 #define RCC_MCO2CFGR 0x464 #define RCC_DBGCFGR 0x468 #define RCC_RCK12SELR 0x480 #define RCC_RCK3SELR 0x484 #define RCC_RCK4SELR 0x488 #define RCC_PLL1CR 0x4a0 #define RCC_PLL1CFGR1 0x4a4 #define RCC_PLL1CFGR2 0x4a8 #define RCC_PLL1FRACR 0x4ac #define RCC_PLL1CSGR 0x4b0 #define RCC_PLL2CR 0x4d0 #define RCC_PLL2CFGR1 0x4d4 #define RCC_PLL2CFGR2 0x4d8 #define RCC_PLL2FRACR 0x4dc #define RCC_PLL2CSGR 0x4e0 #define RCC_PLL3CR 0x500 #define RCC_PLL3CFGR1 0x504 #define RCC_PLL3CFGR2 0x508 #define RCC_PLL3FRACR 0x50c #define RCC_PLL3CSGR 0x510 #define RCC_PLL4CR 0x520 #define RCC_PLL4CFGR1 0x524 #define RCC_PLL4CFGR2 0x528 #define RCC_PLL4FRACR 0x52c #define RCC_PLL4CSGR 0x530 #define RCC_MPCKSELR 0x540 #define RCC_ASSCKSELR 0x544 #define RCC_MSSCKSELR 0x548 #define RCC_CPERCKSELR 0x54c #define RCC_RTCDIVR 0x560 #define RCC_MPCKDIVR 0x564 #define RCC_AXIDIVR 0x568 #define RCC_MLAHBDIVR 0x56c #define RCC_APB1DIVR 0x570 #define RCC_APB2DIVR 0x574 #define RCC_APB3DIVR 0x578 #define RCC_APB4DIVR 0x57c #define RCC_APB5DIVR 0x580 #define RCC_APB6DIVR 0x584 #define RCC_TIMG1PRER 0x5a0 #define RCC_TIMG2PRER 0x5a4 #define RCC_TIMG3PRER 0x5a8 #define RCC_DDRITFCR 0x5c0 #define RCC_I2C12CKSELR 0x600 #define RCC_I2C345CKSELR 0x604 #define RCC_SPI2S1CKSELR 0x608 #define RCC_SPI2S23CKSELR 0x60c #define RCC_SPI45CKSELR 0x610 #define RCC_UART12CKSELR 0x614 #define RCC_UART35CKSELR 0x618 #define RCC_UART4CKSELR 0x61c #define RCC_UART6CKSELR 0x620 #define RCC_UART78CKSELR 0x624 #define RCC_LPTIM1CKSELR 0x628 #define RCC_LPTIM23CKSELR 0x62c #define RCC_LPTIM45CKSELR 0x630 #define RCC_SAI1CKSELR 0x634 #define RCC_SAI2CKSELR 0x638 #define RCC_FDCANCKSELR 0x63c #define RCC_SPDIFCKSELR 0x640 #define RCC_ADC12CKSELR 0x644 #define RCC_SDMMC12CKSELR 0x648 #define RCC_ETH12CKSELR 0x64c #define RCC_USBCKSELR 0x650 #define RCC_QSPICKSELR 0x654 #define RCC_FMCCKSELR 0x658 #define RCC_RNG1CKSELR 0x65c #define RCC_STGENCKSELR 0x660 #define RCC_DCMIPPCKSELR 0x664 #define RCC_SAESCKSELR 0x668 #define RCC_APB1RSTSETR 0x6a0 #define RCC_APB1RSTCLRR 0x6a4 #define RCC_APB2RSTSETR 0x6a8 #define RCC_APB2RSTCLRR 0x6ac #define RCC_APB3RSTSETR 0x6b0 #define RCC_APB3RSTCLRR 0x6b4 #define RCC_APB4RSTSETR 0x6b8 #define RCC_APB4RSTCLRR 0x6bc #define RCC_APB5RSTSETR 0x6c0 #define RCC_APB5RSTCLRR 0x6c4 #define RCC_APB6RSTSETR 0x6c8 #define RCC_APB6RSTCLRR 0x6cc #define RCC_AHB2RSTSETR 0x6d0 #define RCC_AHB2RSTCLRR 0x6d4 #define RCC_AHB4RSTSETR 0x6e0 #define RCC_AHB4RSTCLRR 0x6e4 #define RCC_AHB5RSTSETR 0x6e8 #define RCC_AHB5RSTCLRR 0x6ec #define RCC_AHB6RSTSETR 0x6f0 #define RCC_AHB6RSTCLRR 0x6f4 #define RCC_MP_APB1ENSETR 0x700 #define RCC_MP_APB1ENCLRR 0x704 #define RCC_MP_APB2ENSETR 0x708 #define RCC_MP_APB2ENCLRR 0x70c #define RCC_MP_APB3ENSETR 0x710 #define RCC_MP_APB3ENCLRR 0x714 #define RCC_MP_S_APB3ENSETR 0x718 #define RCC_MP_S_APB3ENCLRR 0x71c #define RCC_MP_NS_APB3ENSETR 0x720 #define RCC_MP_NS_APB3ENCLRR 0x724 #define RCC_MP_APB4ENSETR 0x728 #define RCC_MP_APB4ENCLRR 0x72c #define RCC_MP_S_APB4ENSETR 0x730 #define RCC_MP_S_APB4ENCLRR 0x734 #define RCC_MP_NS_APB4ENSETR 0x738 #define RCC_MP_NS_APB4ENCLRR 0x73c #define RCC_MP_APB5ENSETR 0x740 #define RCC_MP_APB5ENCLRR 0x744 #define RCC_MP_APB6ENSETR 0x748 #define RCC_MP_APB6ENCLRR 0x74c #define RCC_MP_AHB2ENSETR 0x750 #define RCC_MP_AHB2ENCLRR 0x754 #define RCC_MP_AHB4ENSETR 0x760 #define RCC_MP_AHB4ENCLRR 0x764 #define RCC_MP_S_AHB4ENSETR 0x768 #define RCC_MP_S_AHB4ENCLRR 0x76c #define RCC_MP_NS_AHB4ENSETR 0x770 #define RCC_MP_NS_AHB4ENCLRR 0x774 #define RCC_MP_AHB5ENSETR 0x778 #define RCC_MP_AHB5ENCLRR 0x77c #define RCC_MP_AHB6ENSETR 0x780 #define RCC_MP_AHB6ENCLRR 0x784 #define RCC_MP_S_AHB6ENSETR 0x788 #define RCC_MP_S_AHB6ENCLRR 0x78c #define RCC_MP_NS_AHB6ENSETR 0x790 #define RCC_MP_NS_AHB6ENCLRR 0x794 #define RCC_MP_APB1LPENSETR 0x800 #define RCC_MP_APB1LPENCLRR 0x804 #define RCC_MP_APB2LPENSETR 0x808 #define RCC_MP_APB2LPENCLRR 0x80c #define RCC_MP_APB3LPENSETR 0x810 #define RCC_MP_APB3LPENCLRR 0x814 #define RCC_MP_S_APB3LPENSETR 0x818 #define RCC_MP_S_APB3LPENCLRR 0x81c #define RCC_MP_NS_APB3LPENSETR 0x820 #define RCC_MP_NS_APB3LPENCLRR 0x824 #define RCC_MP_APB4LPENSETR 0x828 #define RCC_MP_APB4LPENCLRR 0x82c #define RCC_MP_S_APB4LPENSETR 0x830 #define RCC_MP_S_APB4LPENCLRR 0x834 #define RCC_MP_NS_APB4LPENSETR 0x838 #define RCC_MP_NS_APB4LPENCLRR 0x83c #define RCC_MP_APB5LPENSETR 0x840 #define RCC_MP_APB5LPENCLRR 0x844 #define RCC_MP_APB6LPENSETR 0x848 #define RCC_MP_APB6LPENCLRR 0x84c #define RCC_MP_AHB2LPENSETR 0x850 #define RCC_MP_AHB2LPENCLRR 0x854 #define RCC_MP_AHB4LPENSETR 0x858 #define RCC_MP_AHB4LPENCLRR 0x85c #define RCC_MP_S_AHB4LPENSETR 0x868 #define RCC_MP_S_AHB4LPENCLRR 0x86c #define RCC_MP_NS_AHB4LPENSETR 0x870 #define RCC_MP_NS_AHB4LPENCLRR 0x874 #define RCC_MP_AHB5LPENSETR 0x878 #define RCC_MP_AHB5LPENCLRR 0x87c #define RCC_MP_AHB6LPENSETR 0x880 #define RCC_MP_AHB6LPENCLRR 0x884 #define RCC_MP_S_AHB6LPENSETR 0x888 #define RCC_MP_S_AHB6LPENCLRR 0x88c #define RCC_MP_NS_AHB6LPENSETR 0x890 #define RCC_MP_NS_AHB6LPENCLRR 0x894 #define RCC_MP_S_AXIMLPENSETR 0x898 #define RCC_MP_S_AXIMLPENCLRR 0x89c #define RCC_MP_NS_AXIMLPENSETR 0x8a0 #define RCC_MP_NS_AXIMLPENCLRR 0x8a4 #define RCC_MP_MLAHBLPENSETR 0x8a8 #define RCC_MP_MLAHBLPENCLRR 0x8ac #define RCC_APB3SECSR 0x8c0 #define RCC_APB4SECSR 0x8c4 #define RCC_APB5SECSR 0x8c8 #define RCC_APB6SECSR 0x8cc #define RCC_AHB2SECSR 0x8d0 #define RCC_AHB4SECSR 0x8d4 #define RCC_AHB5SECSR 0x8d8 #define RCC_AHB6SECSR 0x8dc #define RCC_VERR 0xff4 #define RCC_IDR 0xff8 #define RCC_SIDR 0xffc /* RCC_SECCFGR register fields */ #define RCC_SECCFGR_HSISEC 0 #define RCC_SECCFGR_CSISEC 1 #define RCC_SECCFGR_HSESEC 2 #define RCC_SECCFGR_LSISEC 3 #define RCC_SECCFGR_LSESEC 4 #define RCC_SECCFGR_PLL12SEC 8 #define RCC_SECCFGR_PLL3SEC 9 #define RCC_SECCFGR_PLL4SEC 10 #define RCC_SECCFGR_MPUSEC 11 #define RCC_SECCFGR_AXISEC 12 #define RCC_SECCFGR_MLAHBSEC 13 #define RCC_SECCFGR_APB3DIVSEC 16 #define RCC_SECCFGR_APB4DIVSEC 17 #define RCC_SECCFGR_APB5DIVSEC 18 #define RCC_SECCFGR_APB6DIVSEC 19 #define RCC_SECCFGR_TIMG3SEC 20 #define RCC_SECCFGR_CPERSEC 21 #define RCC_SECCFGR_MCO1SEC 22 #define RCC_SECCFGR_MCO2SEC 23 #define RCC_SECCFGR_STPSEC 24 #define RCC_SECCFGR_RSTSEC 25 #define RCC_SECCFGR_PWRSEC 31 /* RCC_MP_SREQSETR register fields */ #define RCC_MP_SREQSETR_STPREQ_P0 BIT(0) /* RCC_MP_SREQCLRR register fields */ #define RCC_MP_SREQCLRR_STPREQ_P0 BIT(0) /* RCC_MP_APRSTCR register fields */ #define RCC_MP_APRSTCR_RDCTLEN BIT(0) #define RCC_MP_APRSTCR_RSTTO_MASK GENMASK(14, 8) #define RCC_MP_APRSTCR_RSTTO_SHIFT 8 /* RCC_MP_APRSTSR register fields */ #define RCC_MP_APRSTSR_RSTTOV_MASK GENMASK(14, 8) #define RCC_MP_APRSTSR_RSTTOV_SHIFT 8 /* RCC_PWRLPDLYCR register fields */ #define RCC_PWRLPDLYCR_PWRLP_DLY_MASK GENMASK(21, 0) #define RCC_PWRLPDLYCR_PWRLP_DLY_SHIFT 0 /* RCC_MP_GRSTCSETR register fields */ #define RCC_MP_GRSTCSETR_MPSYSRST BIT(0) #define RCC_MP_GRSTCSETR_MPUP0RST BIT(4) /* RCC_BR_RSTSCLRR register fields */ #define RCC_BR_RSTSCLRR_PORRSTF BIT(0) #define RCC_BR_RSTSCLRR_BORRSTF BIT(1) #define RCC_BR_RSTSCLRR_PADRSTF BIT(2) #define RCC_BR_RSTSCLRR_HCSSRSTF BIT(3) #define RCC_BR_RSTSCLRR_VCORERSTF BIT(4) #define RCC_BR_RSTSCLRR_VCPURSTF BIT(5) #define RCC_BR_RSTSCLRR_MPSYSRSTF BIT(6) #define RCC_BR_RSTSCLRR_IWDG1RSTF BIT(8) #define RCC_BR_RSTSCLRR_IWDG2RSTF BIT(9) #define RCC_BR_RSTSCLRR_MPUP0RSTF BIT(13) /* RCC_MP_RSTSSETR register fields */ #define RCC_MP_RSTSSETR_PORRSTF BIT(0) #define RCC_MP_RSTSSETR_BORRSTF BIT(1) #define RCC_MP_RSTSSETR_PADRSTF BIT(2) #define RCC_MP_RSTSSETR_HCSSRSTF BIT(3) #define RCC_MP_RSTSSETR_VCORERSTF BIT(4) #define RCC_MP_RSTSSETR_VCPURSTF BIT(5) #define RCC_MP_RSTSSETR_MPSYSRSTF BIT(6) #define RCC_MP_RSTSSETR_IWDG1RSTF BIT(8) #define RCC_MP_RSTSSETR_IWDG2RSTF BIT(9) #define RCC_MP_RSTSSETR_STP2RSTF BIT(10) #define RCC_MP_RSTSSETR_STDBYRSTF BIT(11) #define RCC_MP_RSTSSETR_CSTDBYRSTF BIT(12) #define RCC_MP_RSTSSETR_MPUP0RSTF BIT(13) #define RCC_MP_RSTSSETR_SPARE BIT(15) /* RCC_MP_RSTSCLRR register fields */ #define RCC_MP_RSTSCLRR_PORRSTF BIT(0) #define RCC_MP_RSTSCLRR_BORRSTF BIT(1) #define RCC_MP_RSTSCLRR_PADRSTF BIT(2) #define RCC_MP_RSTSCLRR_HCSSRSTF BIT(3) #define RCC_MP_RSTSCLRR_VCORERSTF BIT(4) #define RCC_MP_RSTSCLRR_VCPURSTF BIT(5) #define RCC_MP_RSTSCLRR_MPSYSRSTF BIT(6) #define RCC_MP_RSTSCLRR_IWDG1RSTF BIT(8) #define RCC_MP_RSTSCLRR_IWDG2RSTF BIT(9) #define RCC_MP_RSTSCLRR_STP2RSTF BIT(10) #define RCC_MP_RSTSCLRR_STDBYRSTF BIT(11) #define RCC_MP_RSTSCLRR_CSTDBYRSTF BIT(12) #define RCC_MP_RSTSCLRR_MPUP0RSTF BIT(13) #define RCC_MP_RSTSCLRR_SPARE BIT(15) /* RCC_MP_IWDGFZSETR register fields */ #define RCC_MP_IWDGFZSETR_FZ_IWDG1 BIT(0) #define RCC_MP_IWDGFZSETR_FZ_IWDG2 BIT(1) /* RCC_MP_IWDGFZCLRR register fields */ #define RCC_MP_IWDGFZCLRR_FZ_IWDG1 BIT(0) #define RCC_MP_IWDGFZCLRR_FZ_IWDG2 BIT(1) /* RCC_MP_CIER register fields */ #define RCC_MP_CIER_LSIRDYIE BIT(0) #define RCC_MP_CIER_LSERDYIE BIT(1) #define RCC_MP_CIER_HSIRDYIE BIT(2) #define RCC_MP_CIER_HSERDYIE BIT(3) #define RCC_MP_CIER_CSIRDYIE BIT(4) #define RCC_MP_CIER_PLL1DYIE BIT(8) #define RCC_MP_CIER_PLL2DYIE BIT(9) #define RCC_MP_CIER_PLL3DYIE BIT(10) #define RCC_MP_CIER_PLL4DYIE BIT(11) #define RCC_MP_CIER_LSECSSIE BIT(16) #define RCC_MP_CIER_WKUPIE BIT(20) /* RCC_MP_CIFR register fields */ #define RCC_MP_CIFR_LSIRDYF BIT(0) #define RCC_MP_CIFR_LSERDYF BIT(1) #define RCC_MP_CIFR_HSIRDYF BIT(2) #define RCC_MP_CIFR_HSERDYF BIT(3) #define RCC_MP_CIFR_CSIRDYF BIT(4) #define RCC_MP_CIFR_PLL1DYF BIT(8) #define RCC_MP_CIFR_PLL2DYF BIT(9) #define RCC_MP_CIFR_PLL3DYF BIT(10) #define RCC_MP_CIFR_PLL4DYF BIT(11) #define RCC_MP_CIFR_LSECSSF BIT(16) #define RCC_MP_CIFR_WKUPF BIT(20) /* RCC_BDCR register fields */ #define RCC_BDCR_LSEON BIT(0) #define RCC_BDCR_LSEBYP BIT(1) #define RCC_BDCR_LSERDY BIT(2) #define RCC_BDCR_DIGBYP BIT(3) #define RCC_BDCR_LSEDRV_MASK GENMASK(5, 4) #define RCC_BDCR_LSECSSON BIT(8) #define RCC_BDCR_LSECSSD BIT(9) #define RCC_BDCR_RTCSRC_MASK GENMASK(17, 16) #define RCC_BDCR_RTCCKEN BIT(20) #define RCC_BDCR_VSWRST BIT(31) #define RCC_BDCR_LSEDRV_SHIFT 4 #define RCC_BDCR_RTCSRC_SHIFT 16 /* RCC_RDLSICR register fields */ #define RCC_RDLSICR_LSION BIT(0) #define RCC_RDLSICR_LSIRDY BIT(1) #define RCC_RDLSICR_MRD_MASK GENMASK(20, 16) #define RCC_RDLSICR_EADLY_MASK GENMASK(26, 24) #define RCC_RDLSICR_SPARE_MASK GENMASK(31, 27) #define RCC_RDLSICR_MRD_SHIFT 16 #define RCC_RDLSICR_EADLY_SHIFT 24 #define RCC_RDLSICR_SPARE_SHIFT 27 /* RCC_OCENSETR register fields */ #define RCC_OCENSETR_HSION BIT(0) #define RCC_OCENSETR_HSIKERON BIT(1) #define RCC_OCENSETR_CSION BIT(4) #define RCC_OCENSETR_CSIKERON BIT(5) #define RCC_OCENSETR_DIGBYP BIT(7) #define RCC_OCENSETR_HSEON BIT(8) #define RCC_OCENSETR_HSEKERON BIT(9) #define RCC_OCENSETR_HSEBYP BIT(10) #define RCC_OCENSETR_HSECSSON BIT(11) /* RCC_OCENCLRR register fields */ #define RCC_OCENCLRR_HSION BIT(0) #define RCC_OCENCLRR_HSIKERON BIT(1) #define RCC_OCENCLRR_CSION BIT(4) #define RCC_OCENCLRR_CSIKERON BIT(5) #define RCC_OCENCLRR_DIGBYP BIT(7) #define RCC_OCENCLRR_HSEON BIT(8) #define RCC_OCENCLRR_HSEKERON BIT(9) #define RCC_OCENCLRR_HSEBYP BIT(10) /* RCC_OCRDYR register fields */ #define RCC_OCRDYR_HSIRDY BIT(0) #define RCC_OCRDYR_HSIDIVRDY BIT(2) #define RCC_OCRDYR_CSIRDY BIT(4) #define RCC_OCRDYR_HSERDY BIT(8) #define RCC_OCRDYR_MPUCKRDY BIT(23) #define RCC_OCRDYR_AXICKRDY BIT(24) /* RCC_HSICFGR register fields */ #define RCC_HSICFGR_HSIDIV_MASK GENMASK(1, 0) #define RCC_HSICFGR_HSITRIM_MASK GENMASK(14, 8) #define RCC_HSICFGR_HSICAL_MASK GENMASK(27, 16) #define RCC_HSICFGR_HSIDIV_SHIFT 0 #define RCC_HSICFGR_HSITRIM_SHIFT 8 #define RCC_HSICFGR_HSICAL_SHIFT 16 /* RCC_CSICFGR register fields */ #define RCC_CSICFGR_CSITRIM_MASK GENMASK(12, 8) #define RCC_CSICFGR_CSICAL_MASK GENMASK(23, 16) #define RCC_CSICFGR_CSITRIM_SHIFT 8 #define RCC_CSICFGR_CSICAL_SHIFT 16 /* RCC_MCO1CFGR register fields */ #define RCC_MCO1CFGR_MCO1SEL_MASK GENMASK(2, 0) #define RCC_MCO1CFGR_MCO1DIV_MASK GENMASK(7, 4) #define RCC_MCO1CFGR_MCO1ON BIT(12) #define RCC_MCO1CFGR_MCO1SEL_SHIFT 0 #define RCC_MCO1CFGR_MCO1DIV_SHIFT 4 /* RCC_MCO2CFGR register fields */ #define RCC_MCO2CFGR_MCO2SEL_MASK GENMASK(2, 0) #define RCC_MCO2CFGR_MCO2DIV_MASK GENMASK(7, 4) #define RCC_MCO2CFGR_MCO2ON BIT(12) #define RCC_MCO2CFGR_MCO2SEL_SHIFT 0 #define RCC_MCO2CFGR_MCO2DIV_SHIFT 4 /* RCC_DBGCFGR register fields */ #define RCC_DBGCFGR_TRACEDIV_MASK GENMASK(2, 0) #define RCC_DBGCFGR_DBGCKEN BIT(8) #define RCC_DBGCFGR_TRACECKEN BIT(9) #define RCC_DBGCFGR_DBGRST BIT(12) #define RCC_DBGCFGR_TRACEDIV_SHIFT 0 /* RCC_RCK12SELR register fields */ #define RCC_RCK12SELR_PLL12SRC_MASK GENMASK(1, 0) #define RCC_RCK12SELR_PLL12SRCRDY BIT(31) #define RCC_RCK12SELR_PLL12SRC_SHIFT 0 /* RCC_RCK3SELR register fields */ #define RCC_RCK3SELR_PLL3SRC_MASK GENMASK(1, 0) #define RCC_RCK3SELR_PLL3SRCRDY BIT(31) #define RCC_RCK3SELR_PLL3SRC_SHIFT 0 /* RCC_RCK4SELR register fields */ #define RCC_RCK4SELR_PLL4SRC_MASK GENMASK(1, 0) #define RCC_RCK4SELR_PLL4SRCRDY BIT(31) #define RCC_RCK4SELR_PLL4SRC_SHIFT 0 /* RCC_PLL1CR register fields */ #define RCC_PLL1CR_PLLON BIT(0) #define RCC_PLL1CR_PLL1RDY BIT(1) #define RCC_PLL1CR_SSCG_CTRL BIT(2) #define RCC_PLL1CR_DIVPEN BIT(4) #define RCC_PLL1CR_DIVQEN BIT(5) #define RCC_PLL1CR_DIVREN BIT(6) /* RCC_PLL1CFGR1 register fields */ #define RCC_PLL1CFGR1_DIVN_MASK GENMASK(8, 0) #define RCC_PLL1CFGR1_DIVM1_MASK GENMASK(21, 16) #define RCC_PLL1CFGR1_DIVN_SHIFT 0 #define RCC_PLL1CFGR1_DIVM1_SHIFT 16 /* RCC_PLL1CFGR2 register fields */ #define RCC_PLL1CFGR2_DIVP_MASK GENMASK(6, 0) #define RCC_PLL1CFGR2_DIVQ_MASK GENMASK(14, 8) #define RCC_PLL1CFGR2_DIVR_MASK GENMASK(22, 16) #define RCC_PLL1CFGR2_DIVP_SHIFT 0 #define RCC_PLL1CFGR2_DIVQ_SHIFT 8 #define RCC_PLL1CFGR2_DIVR_SHIFT 16 /* RCC_PLL1FRACR register fields */ #define RCC_PLL1FRACR_FRACV_MASK GENMASK(15, 3) #define RCC_PLL1FRACR_FRACLE BIT(16) #define RCC_PLL1FRACR_FRACV_SHIFT 3 /* RCC_PLL1CSGR register fields */ #define RCC_PLL1CSGR_MOD_PER_MASK GENMASK(12, 0) #define RCC_PLL1CSGR_TPDFN_DIS BIT(13) #define RCC_PLL1CSGR_RPDFN_DIS BIT(14) #define RCC_PLL1CSGR_SSCG_MODE BIT(15) #define RCC_PLL1CSGR_INC_STEP_MASK GENMASK(30, 16) #define RCC_PLL1CSGR_MOD_PER_SHIFT 0 #define RCC_PLL1CSGR_INC_STEP_SHIFT 16 /* RCC_PLL2CR register fields */ #define RCC_PLL2CR_PLLON BIT(0) #define RCC_PLL2CR_PLL2RDY BIT(1) #define RCC_PLL2CR_SSCG_CTRL BIT(2) #define RCC_PLL2CR_DIVPEN BIT(4) #define RCC_PLL2CR_DIVQEN BIT(5) #define RCC_PLL2CR_DIVREN BIT(6) /* RCC_PLL2CFGR1 register fields */ #define RCC_PLL2CFGR1_DIVN_MASK GENMASK(8, 0) #define RCC_PLL2CFGR1_DIVM2_MASK GENMASK(21, 16) #define RCC_PLL2CFGR1_DIVN_SHIFT 0 #define RCC_PLL2CFGR1_DIVM2_SHIFT 16 /* RCC_PLL2CFGR2 register fields */ #define RCC_PLL2CFGR2_DIVP_MASK GENMASK(6, 0) #define RCC_PLL2CFGR2_DIVQ_MASK GENMASK(14, 8) #define RCC_PLL2CFGR2_DIVR_MASK GENMASK(22, 16) #define RCC_PLL2CFGR2_DIVP_SHIFT 0 #define RCC_PLL2CFGR2_DIVQ_SHIFT 8 #define RCC_PLL2CFGR2_DIVR_SHIFT 16 /* RCC_PLL2FRACR register fields */ #define RCC_PLL2FRACR_FRACV_MASK GENMASK(15, 3) #define RCC_PLL2FRACR_FRACLE BIT(16) #define RCC_PLL2FRACR_FRACV_SHIFT 3 /* RCC_PLL2CSGR register fields */ #define RCC_PLL2CSGR_MOD_PER_MASK GENMASK(12, 0) #define RCC_PLL2CSGR_TPDFN_DIS BIT(13) #define RCC_PLL2CSGR_RPDFN_DIS BIT(14) #define RCC_PLL2CSGR_SSCG_MODE BIT(15) #define RCC_PLL2CSGR_INC_STEP_MASK GENMASK(30, 16) #define RCC_PLL2CSGR_MOD_PER_SHIFT 0 #define RCC_PLL2CSGR_INC_STEP_SHIFT 16 /* RCC_PLL3CR register fields */ #define RCC_PLL3CR_PLLON BIT(0) #define RCC_PLL3CR_PLL3RDY BIT(1) #define RCC_PLL3CR_SSCG_CTRL BIT(2) #define RCC_PLL3CR_DIVPEN BIT(4) #define RCC_PLL3CR_DIVQEN BIT(5) #define RCC_PLL3CR_DIVREN BIT(6) /* RCC_PLL3CFGR1 register fields */ #define RCC_PLL3CFGR1_DIVN_MASK GENMASK(8, 0) #define RCC_PLL3CFGR1_DIVM3_MASK GENMASK(21, 16) #define RCC_PLL3CFGR1_IFRGE_MASK GENMASK(25, 24) #define RCC_PLL3CFGR1_DIVN_SHIFT 0 #define RCC_PLL3CFGR1_DIVM3_SHIFT 16 #define RCC_PLL3CFGR1_IFRGE_SHIFT 24 /* RCC_PLL3CFGR2 register fields */ #define RCC_PLL3CFGR2_DIVP_MASK GENMASK(6, 0) #define RCC_PLL3CFGR2_DIVQ_MASK GENMASK(14, 8) #define RCC_PLL3CFGR2_DIVR_MASK GENMASK(22, 16) #define RCC_PLL3CFGR2_DIVP_SHIFT 0 #define RCC_PLL3CFGR2_DIVQ_SHIFT 8 #define RCC_PLL3CFGR2_DIVR_SHIFT 16 /* RCC_PLL3FRACR register fields */ #define RCC_PLL3FRACR_FRACV_MASK GENMASK(15, 3) #define RCC_PLL3FRACR_FRACLE BIT(16) #define RCC_PLL3FRACR_FRACV_SHIFT 3 /* RCC_PLL3CSGR register fields */ #define RCC_PLL3CSGR_MOD_PER_MASK GENMASK(12, 0) #define RCC_PLL3CSGR_TPDFN_DIS BIT(13) #define RCC_PLL3CSGR_RPDFN_DIS BIT(14) #define RCC_PLL3CSGR_SSCG_MODE BIT(15) #define RCC_PLL3CSGR_INC_STEP_MASK GENMASK(30, 16) #define RCC_PLL3CSGR_MOD_PER_SHIFT 0 #define RCC_PLL3CSGR_INC_STEP_SHIFT 16 /* RCC_PLL4CR register fields */ #define RCC_PLL4CR_PLLON BIT(0) #define RCC_PLL4CR_PLL4RDY BIT(1) #define RCC_PLL4CR_SSCG_CTRL BIT(2) #define RCC_PLL4CR_DIVPEN BIT(4) #define RCC_PLL4CR_DIVQEN BIT(5) #define RCC_PLL4CR_DIVREN BIT(6) /* RCC_PLL4CFGR1 register fields */ #define RCC_PLL4CFGR1_DIVN_MASK GENMASK(8, 0) #define RCC_PLL4CFGR1_DIVM4_MASK GENMASK(21, 16) #define RCC_PLL4CFGR1_IFRGE_MASK GENMASK(25, 24) #define RCC_PLL4CFGR1_DIVN_SHIFT 0 #define RCC_PLL4CFGR1_DIVM4_SHIFT 16 #define RCC_PLL4CFGR1_IFRGE_SHIFT 24 /* RCC_PLL4CFGR2 register fields */ #define RCC_PLL4CFGR2_DIVP_MASK GENMASK(6, 0) #define RCC_PLL4CFGR2_DIVQ_MASK GENMASK(14, 8) #define RCC_PLL4CFGR2_DIVR_MASK GENMASK(22, 16) #define RCC_PLL4CFGR2_DIVP_SHIFT 0 #define RCC_PLL4CFGR2_DIVQ_SHIFT 8 #define RCC_PLL4CFGR2_DIVR_SHIFT 16 /* RCC_PLL4FRACR register fields */ #define RCC_PLL4FRACR_FRACV_MASK GENMASK(15, 3) #define RCC_PLL4FRACR_FRACLE BIT(16) #define RCC_PLL4FRACR_FRACV_SHIFT 3 /* RCC_PLL4CSGR register fields */ #define RCC_PLL4CSGR_MOD_PER_MASK GENMASK(12, 0) #define RCC_PLL4CSGR_TPDFN_DIS BIT(13) #define RCC_PLL4CSGR_RPDFN_DIS BIT(14) #define RCC_PLL4CSGR_SSCG_MODE BIT(15) #define RCC_PLL4CSGR_INC_STEP_MASK GENMASK(30, 16) #define RCC_PLL4CSGR_MOD_PER_SHIFT 0 #define RCC_PLL4CSGR_INC_STEP_SHIFT 16 /* RCC_MPCKSELR register fields */ #define RCC_MPCKSELR_MPUSRC_MASK GENMASK(1, 0) #define RCC_MPCKSELR_MPUSRCRDY BIT(31) #define RCC_MPCKSELR_MPUSRC_SHIFT 0 /* RCC_ASSCKSELR register fields */ #define RCC_ASSCKSELR_AXISSRC_MASK GENMASK(2, 0) #define RCC_ASSCKSELR_AXISSRCRDY BIT(31) #define RCC_ASSCKSELR_AXISSRC_SHIFT 0 /* RCC_MSSCKSELR register fields */ #define RCC_MSSCKSELR_MLAHBSSRC_MASK GENMASK(1, 0) #define RCC_MSSCKSELR_MLAHBSSRCRDY BIT(31) #define RCC_MSSCKSELR_MLAHBSSRC_SHIFT 0 /* RCC_CPERCKSELR register fields */ #define RCC_CPERCKSELR_CKPERSRC_MASK GENMASK(1, 0) #define RCC_CPERCKSELR_CKPERSRC_SHIFT 0 /* RCC_RTCDIVR register fields */ #define RCC_RTCDIVR_RTCDIV_MASK GENMASK(5, 0) #define RCC_RTCDIVR_RTCDIV_SHIFT 0 /* RCC_MPCKDIVR register fields */ #define RCC_MPCKDIVR_MPUDIV_MASK GENMASK(3, 0) #define RCC_MPCKDIVR_MPUDIVRDY BIT(31) #define RCC_MPCKDIVR_MPUDIV_SHIFT 0 /* RCC_AXIDIVR register fields */ #define RCC_AXIDIVR_AXIDIV_MASK GENMASK(2, 0) #define RCC_AXIDIVR_AXIDIVRDY BIT(31) #define RCC_AXIDIVR_AXIDIV_SHIFT 0 /* RCC_MLAHBDIVR register fields */ #define RCC_MLAHBDIVR_MLAHBDIV_MASK GENMASK(3, 0) #define RCC_MLAHBDIVR_MLAHBDIVRDY BIT(31) #define RCC_MLAHBDIVR_MLAHBDIV_SHIFT 0 /* RCC_APB1DIVR register fields */ #define RCC_APB1DIVR_APB1DIV_MASK GENMASK(2, 0) #define RCC_APB1DIVR_APB1DIVRDY BIT(31) #define RCC_APB1DIVR_APB1DIV_SHIFT 0 /* RCC_APB2DIVR register fields */ #define RCC_APB2DIVR_APB2DIV_MASK GENMASK(2, 0) #define RCC_APB2DIVR_APB2DIVRDY BIT(31) #define RCC_APB2DIVR_APB2DIV_SHIFT 0 /* RCC_APB3DIVR register fields */ #define RCC_APB3DIVR_APB3DIV_MASK GENMASK(2, 0) #define RCC_APB3DIVR_APB3DIVRDY BIT(31) #define RCC_APB3DIVR_APB3DIV_SHIFT 0 /* RCC_APB4DIVR register fields */ #define RCC_APB4DIVR_APB4DIV_MASK GENMASK(2, 0) #define RCC_APB4DIVR_APB4DIVRDY BIT(31) #define RCC_APB4DIVR_APB4DIV_SHIFT 0 /* RCC_APB5DIVR register fields */ #define RCC_APB5DIVR_APB5DIV_MASK GENMASK(2, 0) #define RCC_APB5DIVR_APB5DIVRDY BIT(31) #define RCC_APB5DIVR_APB5DIV_SHIFT 0 /* RCC_APB6DIVR register fields */ #define RCC_APB6DIVR_APB6DIV_MASK GENMASK(2, 0) #define RCC_APB6DIVR_APB6DIVRDY BIT(31) #define RCC_APB6DIVR_APB6DIV_SHIFT 0 /* RCC_TIMG1PRER register fields */ #define RCC_TIMG1PRER_TIMG1PRE BIT(0) #define RCC_TIMG1PRER_TIMG1PRERDY BIT(31) /* RCC_TIMG2PRER register fields */ #define RCC_TIMG2PRER_TIMG2PRE BIT(0) #define RCC_TIMG2PRER_TIMG2PRERDY BIT(31) /* RCC_TIMG3PRER register fields */ #define RCC_TIMG3PRER_TIMG3PRE BIT(0) #define RCC_TIMG3PRER_TIMG3PRERDY BIT(31) /* RCC_DDRITFCR register fields */ #define RCC_DDRITFCR_DDRC1EN BIT(0) #define RCC_DDRITFCR_DDRC1LPEN BIT(1) #define RCC_DDRITFCR_DDRPHYCEN BIT(4) #define RCC_DDRITFCR_DDRPHYCLPEN BIT(5) #define RCC_DDRITFCR_DDRCAPBEN BIT(6) #define RCC_DDRITFCR_DDRCAPBLPEN BIT(7) #define RCC_DDRITFCR_AXIDCGEN BIT(8) #define RCC_DDRITFCR_DDRPHYCAPBEN BIT(9) #define RCC_DDRITFCR_DDRPHYCAPBLPEN BIT(10) #define RCC_DDRITFCR_KERDCG_DLY_MASK GENMASK(13, 11) #define RCC_DDRITFCR_DDRCAPBRST BIT(14) #define RCC_DDRITFCR_DDRCAXIRST BIT(15) #define RCC_DDRITFCR_DDRCORERST BIT(16) #define RCC_DDRITFCR_DPHYAPBRST BIT(17) #define RCC_DDRITFCR_DPHYRST BIT(18) #define RCC_DDRITFCR_DPHYCTLRST BIT(19) #define RCC_DDRITFCR_DDRCKMOD_MASK GENMASK(22, 20) #define RCC_DDRITFCR_GSKPMOD BIT(23) #define RCC_DDRITFCR_GSKPCTRL BIT(24) #define RCC_DDRITFCR_DFILP_WIDTH_MASK GENMASK(27, 25) #define RCC_DDRITFCR_GSKP_DUR_MASK GENMASK(31, 28) #define RCC_DDRITFCR_KERDCG_DLY_SHIFT 11 #define RCC_DDRITFCR_DDRCKMOD_SHIFT 20 #define RCC_DDRITFCR_DFILP_WIDTH_SHIFT 25 #define RCC_DDRITFCR_GSKP_DUR_SHIFT 28 /* RCC_I2C12CKSELR register fields */ #define RCC_I2C12CKSELR_I2C12SRC_MASK GENMASK(2, 0) #define RCC_I2C12CKSELR_I2C12SRC_SHIFT 0 /* RCC_I2C345CKSELR register fields */ #define RCC_I2C345CKSELR_I2C3SRC_MASK GENMASK(2, 0) #define RCC_I2C345CKSELR_I2C4SRC_MASK GENMASK(5, 3) #define RCC_I2C345CKSELR_I2C5SRC_MASK GENMASK(8, 6) #define RCC_I2C345CKSELR_I2C3SRC_SHIFT 0 #define RCC_I2C345CKSELR_I2C4SRC_SHIFT 3 #define RCC_I2C345CKSELR_I2C5SRC_SHIFT 6 /* RCC_SPI2S1CKSELR register fields */ #define RCC_SPI2S1CKSELR_SPI1SRC_MASK GENMASK(2, 0) #define RCC_SPI2S1CKSELR_SPI1SRC_SHIFT 0 /* RCC_SPI2S23CKSELR register fields */ #define RCC_SPI2S23CKSELR_SPI23SRC_MASK GENMASK(2, 0) #define RCC_SPI2S23CKSELR_SPI23SRC_SHIFT 0 /* RCC_SPI45CKSELR register fields */ #define RCC_SPI45CKSELR_SPI4SRC_MASK GENMASK(2, 0) #define RCC_SPI45CKSELR_SPI5SRC_MASK GENMASK(5, 3) #define RCC_SPI45CKSELR_SPI4SRC_SHIFT 0 #define RCC_SPI45CKSELR_SPI5SRC_SHIFT 3 /* RCC_UART12CKSELR register fields */ #define RCC_UART12CKSELR_UART1SRC_MASK GENMASK(2, 0) #define RCC_UART12CKSELR_UART2SRC_MASK GENMASK(5, 3) #define RCC_UART12CKSELR_UART1SRC_SHIFT 0 #define RCC_UART12CKSELR_UART2SRC_SHIFT 3 /* RCC_UART35CKSELR register fields */ #define RCC_UART35CKSELR_UART35SRC_MASK GENMASK(2, 0) #define RCC_UART35CKSELR_UART35SRC_SHIFT 0 /* RCC_UART4CKSELR register fields */ #define RCC_UART4CKSELR_UART4SRC_MASK GENMASK(2, 0) #define RCC_UART4CKSELR_UART4SRC_SHIFT 0 /* RCC_UART6CKSELR register fields */ #define RCC_UART6CKSELR_UART6SRC_MASK GENMASK(2, 0) #define RCC_UART6CKSELR_UART6SRC_SHIFT 0 /* RCC_UART78CKSELR register fields */ #define RCC_UART78CKSELR_UART78SRC_MASK GENMASK(2, 0) #define RCC_UART78CKSELR_UART78SRC_SHIFT 0 /* RCC_LPTIM1CKSELR register fields */ #define RCC_LPTIM1CKSELR_LPTIM1SRC_MASK GENMASK(2, 0) #define RCC_LPTIM1CKSELR_LPTIM1SRC_SHIFT 0 /* RCC_LPTIM23CKSELR register fields */ #define RCC_LPTIM23CKSELR_LPTIM2SRC_MASK GENMASK(2, 0) #define RCC_LPTIM23CKSELR_LPTIM3SRC_MASK GENMASK(5, 3) #define RCC_LPTIM23CKSELR_LPTIM2SRC_SHIFT 0 #define RCC_LPTIM23CKSELR_LPTIM3SRC_SHIFT 3 /* RCC_LPTIM45CKSELR register fields */ #define RCC_LPTIM45CKSELR_LPTIM45SRC_MASK GENMASK(2, 0) #define RCC_LPTIM45CKSELR_LPTIM45SRC_SHIFT 0 /* RCC_SAI1CKSELR register fields */ #define RCC_SAI1CKSELR_SAI1SRC_MASK GENMASK(2, 0) #define RCC_SAI1CKSELR_SAI1SRC_SHIFT 0 /* RCC_SAI2CKSELR register fields */ #define RCC_SAI2CKSELR_SAI2SRC_MASK GENMASK(2, 0) #define RCC_SAI2CKSELR_SAI2SRC_SHIFT 0 /* RCC_FDCANCKSELR register fields */ #define RCC_FDCANCKSELR_FDCANSRC_MASK GENMASK(1, 0) #define RCC_FDCANCKSELR_FDCANSRC_SHIFT 0 /* RCC_SPDIFCKSELR register fields */ #define RCC_SPDIFCKSELR_SPDIFSRC_MASK GENMASK(1, 0) #define RCC_SPDIFCKSELR_SPDIFSRC_SHIFT 0 /* RCC_ADC12CKSELR register fields */ #define RCC_ADC12CKSELR_ADC1SRC_MASK GENMASK(1, 0) #define RCC_ADC12CKSELR_ADC2SRC_MASK GENMASK(3, 2) #define RCC_ADC12CKSELR_ADC1SRC_SHIFT 0 #define RCC_ADC12CKSELR_ADC2SRC_SHIFT 2 /* RCC_SDMMC12CKSELR register fields */ #define RCC_SDMMC12CKSELR_SDMMC1SRC_MASK GENMASK(2, 0) #define RCC_SDMMC12CKSELR_SDMMC2SRC_MASK GENMASK(5, 3) #define RCC_SDMMC12CKSELR_SDMMC1SRC_SHIFT 0 #define RCC_SDMMC12CKSELR_SDMMC2SRC_SHIFT 3 /* RCC_ETH12CKSELR register fields */ #define RCC_ETH12CKSELR_ETH1SRC_MASK GENMASK(1, 0) #define RCC_ETH12CKSELR_ETH1PTPDIV_MASK GENMASK(7, 4) #define RCC_ETH12CKSELR_ETH2SRC_MASK GENMASK(9, 8) #define RCC_ETH12CKSELR_ETH2PTPDIV_MASK GENMASK(15, 12) #define RCC_ETH12CKSELR_ETH1SRC_SHIFT 0 #define RCC_ETH12CKSELR_ETH1PTPDIV_SHIFT 4 #define RCC_ETH12CKSELR_ETH2SRC_SHIFT 8 #define RCC_ETH12CKSELR_ETH2PTPDIV_SHIFT 12 /* RCC_USBCKSELR register fields */ #define RCC_USBCKSELR_USBPHYSRC_MASK GENMASK(1, 0) #define RCC_USBCKSELR_USBOSRC BIT(4) #define RCC_USBCKSELR_USBPHYSRC_SHIFT 0 /* RCC_QSPICKSELR register fields */ #define RCC_QSPICKSELR_QSPISRC_MASK GENMASK(1, 0) #define RCC_QSPICKSELR_QSPISRC_SHIFT 0 /* RCC_FMCCKSELR register fields */ #define RCC_FMCCKSELR_FMCSRC_MASK GENMASK(1, 0) #define RCC_FMCCKSELR_FMCSRC_SHIFT 0 /* RCC_RNG1CKSELR register fields */ #define RCC_RNG1CKSELR_RNG1SRC_MASK GENMASK(1, 0) #define RCC_RNG1CKSELR_RNG1SRC_SHIFT 0 /* RCC_STGENCKSELR register fields */ #define RCC_STGENCKSELR_STGENSRC_MASK GENMASK(1, 0) #define RCC_STGENCKSELR_STGENSRC_SHIFT 0 /* RCC_DCMIPPCKSELR register fields */ #define RCC_DCMIPPCKSELR_DCMIPPSRC_MASK GENMASK(1, 0) #define RCC_DCMIPPCKSELR_DCMIPPSRC_SHIFT 0 /* RCC_SAESCKSELR register fields */ #define RCC_SAESCKSELR_SAESSRC_MASK GENMASK(1, 0) #define RCC_SAESCKSELR_SAESSRC_SHIFT 0 /* RCC_APB1RSTSETR register fields */ #define RCC_APB1RSTSETR_TIM2RST BIT(0) #define RCC_APB1RSTSETR_TIM3RST BIT(1) #define RCC_APB1RSTSETR_TIM4RST BIT(2) #define RCC_APB1RSTSETR_TIM5RST BIT(3) #define RCC_APB1RSTSETR_TIM6RST BIT(4) #define RCC_APB1RSTSETR_TIM7RST BIT(5) #define RCC_APB1RSTSETR_LPTIM1RST BIT(9) #define RCC_APB1RSTSETR_SPI2RST BIT(11) #define RCC_APB1RSTSETR_SPI3RST BIT(12) #define RCC_APB1RSTSETR_USART3RST BIT(15) #define RCC_APB1RSTSETR_UART4RST BIT(16) #define RCC_APB1RSTSETR_UART5RST BIT(17) #define RCC_APB1RSTSETR_UART7RST BIT(18) #define RCC_APB1RSTSETR_UART8RST BIT(19) #define RCC_APB1RSTSETR_I2C1RST BIT(21) #define RCC_APB1RSTSETR_I2C2RST BIT(22) #define RCC_APB1RSTSETR_SPDIFRST BIT(26) /* RCC_APB1RSTCLRR register fields */ #define RCC_APB1RSTCLRR_TIM2RST BIT(0) #define RCC_APB1RSTCLRR_TIM3RST BIT(1) #define RCC_APB1RSTCLRR_TIM4RST BIT(2) #define RCC_APB1RSTCLRR_TIM5RST BIT(3) #define RCC_APB1RSTCLRR_TIM6RST BIT(4) #define RCC_APB1RSTCLRR_TIM7RST BIT(5) #define RCC_APB1RSTCLRR_LPTIM1RST BIT(9) #define RCC_APB1RSTCLRR_SPI2RST BIT(11) #define RCC_APB1RSTCLRR_SPI3RST BIT(12) #define RCC_APB1RSTCLRR_USART3RST BIT(15) #define RCC_APB1RSTCLRR_UART4RST BIT(16) #define RCC_APB1RSTCLRR_UART5RST BIT(17) #define RCC_APB1RSTCLRR_UART7RST BIT(18) #define RCC_APB1RSTCLRR_UART8RST BIT(19) #define RCC_APB1RSTCLRR_I2C1RST BIT(21) #define RCC_APB1RSTCLRR_I2C2RST BIT(22) #define RCC_APB1RSTCLRR_SPDIFRST BIT(26) /* RCC_APB2RSTSETR register fields */ #define RCC_APB2RSTSETR_TIM1RST BIT(0) #define RCC_APB2RSTSETR_TIM8RST BIT(1) #define RCC_APB2RSTSETR_SPI1RST BIT(8) #define RCC_APB2RSTSETR_USART6RST BIT(13) #define RCC_APB2RSTSETR_SAI1RST BIT(16) #define RCC_APB2RSTSETR_SAI2RST BIT(17) #define RCC_APB2RSTSETR_DFSDMRST BIT(20) #define RCC_APB2RSTSETR_FDCANRST BIT(24) /* RCC_APB2RSTCLRR register fields */ #define RCC_APB2RSTCLRR_TIM1RST BIT(0) #define RCC_APB2RSTCLRR_TIM8RST BIT(1) #define RCC_APB2RSTCLRR_SPI1RST BIT(8) #define RCC_APB2RSTCLRR_USART6RST BIT(13) #define RCC_APB2RSTCLRR_SAI1RST BIT(16) #define RCC_APB2RSTCLRR_SAI2RST BIT(17) #define RCC_APB2RSTCLRR_DFSDMRST BIT(20) #define RCC_APB2RSTCLRR_FDCANRST BIT(24) /* RCC_APB3RSTSETR register fields */ #define RCC_APB3RSTSETR_LPTIM2RST BIT(0) #define RCC_APB3RSTSETR_LPTIM3RST BIT(1) #define RCC_APB3RSTSETR_LPTIM4RST BIT(2) #define RCC_APB3RSTSETR_LPTIM5RST BIT(3) #define RCC_APB3RSTSETR_SYSCFGRST BIT(11) #define RCC_APB3RSTSETR_VREFRST BIT(13) #define RCC_APB3RSTSETR_DTSRST BIT(16) #define RCC_APB3RSTSETR_PMBCTRLRST BIT(17) /* RCC_APB3RSTCLRR register fields */ #define RCC_APB3RSTCLRR_LPTIM2RST BIT(0) #define RCC_APB3RSTCLRR_LPTIM3RST BIT(1) #define RCC_APB3RSTCLRR_LPTIM4RST BIT(2) #define RCC_APB3RSTCLRR_LPTIM5RST BIT(3) #define RCC_APB3RSTCLRR_SYSCFGRST BIT(11) #define RCC_APB3RSTCLRR_VREFRST BIT(13) #define RCC_APB3RSTCLRR_DTSRST BIT(16) #define RCC_APB3RSTCLRR_PMBCTRLRST BIT(17) /* RCC_APB4RSTSETR register fields */ #define RCC_APB4RSTSETR_LTDCRST BIT(0) #define RCC_APB4RSTSETR_DCMIPPRST BIT(1) #define RCC_APB4RSTSETR_DDRPERFMRST BIT(8) #define RCC_APB4RSTSETR_USBPHYRST BIT(16) /* RCC_APB4RSTCLRR register fields */ #define RCC_APB4RSTCLRR_LTDCRST BIT(0) #define RCC_APB4RSTCLRR_DCMIPPRST BIT(1) #define RCC_APB4RSTCLRR_DDRPERFMRST BIT(8) #define RCC_APB4RSTCLRR_USBPHYRST BIT(16) /* RCC_APB5RSTSETR register fields */ #define RCC_APB5RSTSETR_STGENRST BIT(20) /* RCC_APB5RSTCLRR register fields */ #define RCC_APB5RSTCLRR_STGENRST BIT(20) /* RCC_APB6RSTSETR register fields */ #define RCC_APB6RSTSETR_USART1RST BIT(0) #define RCC_APB6RSTSETR_USART2RST BIT(1) #define RCC_APB6RSTSETR_SPI4RST BIT(2) #define RCC_APB6RSTSETR_SPI5RST BIT(3) #define RCC_APB6RSTSETR_I2C3RST BIT(4) #define RCC_APB6RSTSETR_I2C4RST BIT(5) #define RCC_APB6RSTSETR_I2C5RST BIT(6) #define RCC_APB6RSTSETR_TIM12RST BIT(7) #define RCC_APB6RSTSETR_TIM13RST BIT(8) #define RCC_APB6RSTSETR_TIM14RST BIT(9) #define RCC_APB6RSTSETR_TIM15RST BIT(10) #define RCC_APB6RSTSETR_TIM16RST BIT(11) #define RCC_APB6RSTSETR_TIM17RST BIT(12) /* RCC_APB6RSTCLRR register fields */ #define RCC_APB6RSTCLRR_USART1RST BIT(0) #define RCC_APB6RSTCLRR_USART2RST BIT(1) #define RCC_APB6RSTCLRR_SPI4RST BIT(2) #define RCC_APB6RSTCLRR_SPI5RST BIT(3) #define RCC_APB6RSTCLRR_I2C3RST BIT(4) #define RCC_APB6RSTCLRR_I2C4RST BIT(5) #define RCC_APB6RSTCLRR_I2C5RST BIT(6) #define RCC_APB6RSTCLRR_TIM12RST BIT(7) #define RCC_APB6RSTCLRR_TIM13RST BIT(8) #define RCC_APB6RSTCLRR_TIM14RST BIT(9) #define RCC_APB6RSTCLRR_TIM15RST BIT(10) #define RCC_APB6RSTCLRR_TIM16RST BIT(11) #define RCC_APB6RSTCLRR_TIM17RST BIT(12) /* RCC_AHB2RSTSETR register fields */ #define RCC_AHB2RSTSETR_DMA1RST BIT(0) #define RCC_AHB2RSTSETR_DMA2RST BIT(1) #define RCC_AHB2RSTSETR_DMAMUX1RST BIT(2) #define RCC_AHB2RSTSETR_DMA3RST BIT(3) #define RCC_AHB2RSTSETR_DMAMUX2RST BIT(4) #define RCC_AHB2RSTSETR_ADC1RST BIT(5) #define RCC_AHB2RSTSETR_ADC2RST BIT(6) #define RCC_AHB2RSTSETR_USBORST BIT(8) /* RCC_AHB2RSTCLRR register fields */ #define RCC_AHB2RSTCLRR_DMA1RST BIT(0) #define RCC_AHB2RSTCLRR_DMA2RST BIT(1) #define RCC_AHB2RSTCLRR_DMAMUX1RST BIT(2) #define RCC_AHB2RSTCLRR_DMA3RST BIT(3) #define RCC_AHB2RSTCLRR_DMAMUX2RST BIT(4) #define RCC_AHB2RSTCLRR_ADC1RST BIT(5) #define RCC_AHB2RSTCLRR_ADC2RST BIT(6) #define RCC_AHB2RSTCLRR_USBORST BIT(8) /* RCC_AHB4RSTSETR register fields */ #define RCC_AHB4RSTSETR_GPIOARST BIT(0) #define RCC_AHB4RSTSETR_GPIOBRST BIT(1) #define RCC_AHB4RSTSETR_GPIOCRST BIT(2) #define RCC_AHB4RSTSETR_GPIODRST BIT(3) #define RCC_AHB4RSTSETR_GPIOERST BIT(4) #define RCC_AHB4RSTSETR_GPIOFRST BIT(5) #define RCC_AHB4RSTSETR_GPIOGRST BIT(6) #define RCC_AHB4RSTSETR_GPIOHRST BIT(7) #define RCC_AHB4RSTSETR_GPIOIRST BIT(8) #define RCC_AHB4RSTSETR_TSCRST BIT(15) /* RCC_AHB4RSTCLRR register fields */ #define RCC_AHB4RSTCLRR_GPIOARST BIT(0) #define RCC_AHB4RSTCLRR_GPIOBRST BIT(1) #define RCC_AHB4RSTCLRR_GPIOCRST BIT(2) #define RCC_AHB4RSTCLRR_GPIODRST BIT(3) #define RCC_AHB4RSTCLRR_GPIOERST BIT(4) #define RCC_AHB4RSTCLRR_GPIOFRST BIT(5) #define RCC_AHB4RSTCLRR_GPIOGRST BIT(6) #define RCC_AHB4RSTCLRR_GPIOHRST BIT(7) #define RCC_AHB4RSTCLRR_GPIOIRST BIT(8) #define RCC_AHB4RSTCLRR_TSCRST BIT(15) /* RCC_AHB5RSTSETR register fields */ #define RCC_AHB5RSTSETR_PKARST BIT(2) #define RCC_AHB5RSTSETR_SAESRST BIT(3) #define RCC_AHB5RSTSETR_CRYP1RST BIT(4) #define RCC_AHB5RSTSETR_HASH1RST BIT(5) #define RCC_AHB5RSTSETR_RNG1RST BIT(6) #define RCC_AHB5RSTSETR_AXIMCRST BIT(16) /* RCC_AHB5RSTCLRR register fields */ #define RCC_AHB5RSTCLRR_PKARST BIT(2) #define RCC_AHB5RSTCLRR_SAESRST BIT(3) #define RCC_AHB5RSTCLRR_CRYP1RST BIT(4) #define RCC_AHB5RSTCLRR_HASH1RST BIT(5) #define RCC_AHB5RSTCLRR_RNG1RST BIT(6) #define RCC_AHB5RSTCLRR_AXIMCRST BIT(16) /* RCC_AHB6RSTSETR register fields */ #define RCC_AHB6RSTSETR_MDMARST BIT(0) #define RCC_AHB6RSTSETR_MCERST BIT(1) #define RCC_AHB6RSTSETR_ETH1MACRST BIT(10) #define RCC_AHB6RSTSETR_FMCRST BIT(12) #define RCC_AHB6RSTSETR_QSPIRST BIT(14) #define RCC_AHB6RSTSETR_SDMMC1RST BIT(16) #define RCC_AHB6RSTSETR_SDMMC2RST BIT(17) #define RCC_AHB6RSTSETR_CRC1RST BIT(20) #define RCC_AHB6RSTSETR_USBHRST BIT(24) #define RCC_AHB6RSTSETR_ETH2MACRST BIT(30) /* RCC_AHB6RSTCLRR register fields */ #define RCC_AHB6RSTCLRR_MDMARST BIT(0) #define RCC_AHB6RSTCLRR_MCERST BIT(1) #define RCC_AHB6RSTCLRR_ETH1MACRST BIT(10) #define RCC_AHB6RSTCLRR_FMCRST BIT(12) #define RCC_AHB6RSTCLRR_QSPIRST BIT(14) #define RCC_AHB6RSTCLRR_SDMMC1RST BIT(16) #define RCC_AHB6RSTCLRR_SDMMC2RST BIT(17) #define RCC_AHB6RSTCLRR_CRC1RST BIT(20) #define RCC_AHB6RSTCLRR_USBHRST BIT(24) #define RCC_AHB6RSTCLRR_ETH2MACRST BIT(30) /* RCC_MP_APB1ENSETR register fields */ #define RCC_MP_APB1ENSETR_TIM2EN BIT(0) #define RCC_MP_APB1ENSETR_TIM3EN BIT(1) #define RCC_MP_APB1ENSETR_TIM4EN BIT(2) #define RCC_MP_APB1ENSETR_TIM5EN BIT(3) #define RCC_MP_APB1ENSETR_TIM6EN BIT(4) #define RCC_MP_APB1ENSETR_TIM7EN BIT(5) #define RCC_MP_APB1ENSETR_LPTIM1EN BIT(9) #define RCC_MP_APB1ENSETR_SPI2EN BIT(11) #define RCC_MP_APB1ENSETR_SPI3EN BIT(12) #define RCC_MP_APB1ENSETR_USART3EN BIT(15) #define RCC_MP_APB1ENSETR_UART4EN BIT(16) #define RCC_MP_APB1ENSETR_UART5EN BIT(17) #define RCC_MP_APB1ENSETR_UART7EN BIT(18) #define RCC_MP_APB1ENSETR_UART8EN BIT(19) #define RCC_MP_APB1ENSETR_I2C1EN BIT(21) #define RCC_MP_APB1ENSETR_I2C2EN BIT(22) #define RCC_MP_APB1ENSETR_SPDIFEN BIT(26) /* RCC_MP_APB1ENCLRR register fields */ #define RCC_MP_APB1ENCLRR_TIM2EN BIT(0) #define RCC_MP_APB1ENCLRR_TIM3EN BIT(1) #define RCC_MP_APB1ENCLRR_TIM4EN BIT(2) #define RCC_MP_APB1ENCLRR_TIM5EN BIT(3) #define RCC_MP_APB1ENCLRR_TIM6EN BIT(4) #define RCC_MP_APB1ENCLRR_TIM7EN BIT(5) #define RCC_MP_APB1ENCLRR_LPTIM1EN BIT(9) #define RCC_MP_APB1ENCLRR_SPI2EN BIT(11) #define RCC_MP_APB1ENCLRR_SPI3EN BIT(12) #define RCC_MP_APB1ENCLRR_USART3EN BIT(15) #define RCC_MP_APB1ENCLRR_UART4EN BIT(16) #define RCC_MP_APB1ENCLRR_UART5EN BIT(17) #define RCC_MP_APB1ENCLRR_UART7EN BIT(18) #define RCC_MP_APB1ENCLRR_UART8EN BIT(19) #define RCC_MP_APB1ENCLRR_I2C1EN BIT(21) #define RCC_MP_APB1ENCLRR_I2C2EN BIT(22) #define RCC_MP_APB1ENCLRR_SPDIFEN BIT(26) /* RCC_MP_APB2ENSETR register fields */ #define RCC_MP_APB2ENSETR_TIM1EN BIT(0) #define RCC_MP_APB2ENSETR_TIM8EN BIT(1) #define RCC_MP_APB2ENSETR_SPI1EN BIT(8) #define RCC_MP_APB2ENSETR_USART6EN BIT(13) #define RCC_MP_APB2ENSETR_SAI1EN BIT(16) #define RCC_MP_APB2ENSETR_SAI2EN BIT(17) #define RCC_MP_APB2ENSETR_DFSDMEN BIT(20) #define RCC_MP_APB2ENSETR_ADFSDMEN BIT(21) #define RCC_MP_APB2ENSETR_FDCANEN BIT(24) /* RCC_MP_APB2ENCLRR register fields */ #define RCC_MP_APB2ENCLRR_TIM1EN BIT(0) #define RCC_MP_APB2ENCLRR_TIM8EN BIT(1) #define RCC_MP_APB2ENCLRR_SPI1EN BIT(8) #define RCC_MP_APB2ENCLRR_USART6EN BIT(13) #define RCC_MP_APB2ENCLRR_SAI1EN BIT(16) #define RCC_MP_APB2ENCLRR_SAI2EN BIT(17) #define RCC_MP_APB2ENCLRR_DFSDMEN BIT(20) #define RCC_MP_APB2ENCLRR_ADFSDMEN BIT(21) #define RCC_MP_APB2ENCLRR_FDCANEN BIT(24) /* RCC_MP_APB3ENSETR register fields */ #define RCC_MP_APB3ENSETR_LPTIM2EN BIT(0) #define RCC_MP_APB3ENSETR_LPTIM3EN BIT(1) #define RCC_MP_APB3ENSETR_LPTIM4EN BIT(2) #define RCC_MP_APB3ENSETR_LPTIM5EN BIT(3) #define RCC_MP_APB3ENSETR_VREFEN BIT(13) #define RCC_MP_APB3ENSETR_DTSEN BIT(16) #define RCC_MP_APB3ENSETR_PMBCTRLEN BIT(17) #define RCC_MP_APB3ENSETR_HDPEN BIT(20) /* RCC_MP_APB3ENCLRR register fields */ #define RCC_MP_APB3ENCLRR_LPTIM2EN BIT(0) #define RCC_MP_APB3ENCLRR_LPTIM3EN BIT(1) #define RCC_MP_APB3ENCLRR_LPTIM4EN BIT(2) #define RCC_MP_APB3ENCLRR_LPTIM5EN BIT(3) #define RCC_MP_APB3ENCLRR_VREFEN BIT(13) #define RCC_MP_APB3ENCLRR_DTSEN BIT(16) #define RCC_MP_APB3ENCLRR_PMBCTRLEN BIT(17) #define RCC_MP_APB3ENCLRR_HDPEN BIT(20) /* RCC_MP_S_APB3ENSETR register fields */ #define RCC_MP_S_APB3ENSETR_SYSCFGEN BIT(0) /* RCC_MP_S_APB3ENCLRR register fields */ #define RCC_MP_S_APB3ENCLRR_SYSCFGEN BIT(0) /* RCC_MP_NS_APB3ENSETR register fields */ #define RCC_MP_NS_APB3ENSETR_SYSCFGEN BIT(0) /* RCC_MP_NS_APB3ENCLRR register fields */ #define RCC_MP_NS_APB3ENCLRR_SYSCFGEN BIT(0) /* RCC_MP_APB4ENSETR register fields */ #define RCC_MP_APB4ENSETR_DCMIPPEN BIT(1) #define RCC_MP_APB4ENSETR_DDRPERFMEN BIT(8) #define RCC_MP_APB4ENSETR_IWDG2APBEN BIT(15) #define RCC_MP_APB4ENSETR_USBPHYEN BIT(16) #define RCC_MP_APB4ENSETR_STGENROEN BIT(20) /* RCC_MP_APB4ENCLRR register fields */ #define RCC_MP_APB4ENCLRR_DCMIPPEN BIT(1) #define RCC_MP_APB4ENCLRR_DDRPERFMEN BIT(8) #define RCC_MP_APB4ENCLRR_IWDG2APBEN BIT(15) #define RCC_MP_APB4ENCLRR_USBPHYEN BIT(16) #define RCC_MP_APB4ENCLRR_STGENROEN BIT(20) /* RCC_MP_S_APB4ENSETR register fields */ #define RCC_MP_S_APB4ENSETR_LTDCEN BIT(0) /* RCC_MP_S_APB4ENCLRR register fields */ #define RCC_MP_S_APB4ENCLRR_LTDCEN BIT(0) /* RCC_MP_NS_APB4ENSETR register fields */ #define RCC_MP_NS_APB4ENSETR_LTDCEN BIT(0) /* RCC_MP_NS_APB4ENCLRR register fields */ #define RCC_MP_NS_APB4ENCLRR_LTDCEN BIT(0) /* RCC_MP_APB5ENSETR register fields */ #define RCC_MP_APB5ENSETR_RTCAPBEN BIT(8) #define RCC_MP_APB5ENSETR_TZCEN BIT(11) #define RCC_MP_APB5ENSETR_ETZPCEN BIT(13) #define RCC_MP_APB5ENSETR_IWDG1APBEN BIT(15) #define RCC_MP_APB5ENSETR_BSECEN BIT(16) #define RCC_MP_APB5ENSETR_STGENCEN BIT(20) /* RCC_MP_APB5ENCLRR register fields */ #define RCC_MP_APB5ENCLRR_RTCAPBEN BIT(8) #define RCC_MP_APB5ENCLRR_TZCEN BIT(11) #define RCC_MP_APB5ENCLRR_ETZPCEN BIT(13) #define RCC_MP_APB5ENCLRR_IWDG1APBEN BIT(15) #define RCC_MP_APB5ENCLRR_BSECEN BIT(16) #define RCC_MP_APB5ENCLRR_STGENCEN BIT(20) /* RCC_MP_APB6ENSETR register fields */ #define RCC_MP_APB6ENSETR_USART1EN BIT(0) #define RCC_MP_APB6ENSETR_USART2EN BIT(1) #define RCC_MP_APB6ENSETR_SPI4EN BIT(2) #define RCC_MP_APB6ENSETR_SPI5EN BIT(3) #define RCC_MP_APB6ENSETR_I2C3EN BIT(4) #define RCC_MP_APB6ENSETR_I2C4EN BIT(5) #define RCC_MP_APB6ENSETR_I2C5EN BIT(6) #define RCC_MP_APB6ENSETR_TIM12EN BIT(7) #define RCC_MP_APB6ENSETR_TIM13EN BIT(8) #define RCC_MP_APB6ENSETR_TIM14EN BIT(9) #define RCC_MP_APB6ENSETR_TIM15EN BIT(10) #define RCC_MP_APB6ENSETR_TIM16EN BIT(11) #define RCC_MP_APB6ENSETR_TIM17EN BIT(12) /* RCC_MP_APB6ENCLRR register fields */ #define RCC_MP_APB6ENCLRR_USART1EN BIT(0) #define RCC_MP_APB6ENCLRR_USART2EN BIT(1) #define RCC_MP_APB6ENCLRR_SPI4EN BIT(2) #define RCC_MP_APB6ENCLRR_SPI5EN BIT(3) #define RCC_MP_APB6ENCLRR_I2C3EN BIT(4) #define RCC_MP_APB6ENCLRR_I2C4EN BIT(5) #define RCC_MP_APB6ENCLRR_I2C5EN BIT(6) #define RCC_MP_APB6ENCLRR_TIM12EN BIT(7) #define RCC_MP_APB6ENCLRR_TIM13EN BIT(8) #define RCC_MP_APB6ENCLRR_TIM14EN BIT(9) #define RCC_MP_APB6ENCLRR_TIM15EN BIT(10) #define RCC_MP_APB6ENCLRR_TIM16EN BIT(11) #define RCC_MP_APB6ENCLRR_TIM17EN BIT(12) /* RCC_MP_AHB2ENSETR register fields */ #define RCC_MP_AHB2ENSETR_DMA1EN BIT(0) #define RCC_MP_AHB2ENSETR_DMA2EN BIT(1) #define RCC_MP_AHB2ENSETR_DMAMUX1EN BIT(2) #define RCC_MP_AHB2ENSETR_DMA3EN BIT(3) #define RCC_MP_AHB2ENSETR_DMAMUX2EN BIT(4) #define RCC_MP_AHB2ENSETR_ADC1EN BIT(5) #define RCC_MP_AHB2ENSETR_ADC2EN BIT(6) #define RCC_MP_AHB2ENSETR_USBOEN BIT(8) /* RCC_MP_AHB2ENCLRR register fields */ #define RCC_MP_AHB2ENCLRR_DMA1EN BIT(0) #define RCC_MP_AHB2ENCLRR_DMA2EN BIT(1) #define RCC_MP_AHB2ENCLRR_DMAMUX1EN BIT(2) #define RCC_MP_AHB2ENCLRR_DMA3EN BIT(3) #define RCC_MP_AHB2ENCLRR_DMAMUX2EN BIT(4) #define RCC_MP_AHB2ENCLRR_ADC1EN BIT(5) #define RCC_MP_AHB2ENCLRR_ADC2EN BIT(6) #define RCC_MP_AHB2ENCLRR_USBOEN BIT(8) /* RCC_MP_AHB4ENSETR register fields */ #define RCC_MP_AHB4ENSETR_TSCEN BIT(15) /* RCC_MP_AHB4ENCLRR register fields */ #define RCC_MP_AHB4ENCLRR_TSCEN BIT(15) /* RCC_MP_S_AHB4ENSETR register fields */ #define RCC_MP_S_AHB4ENSETR_GPIOAEN BIT(0) #define RCC_MP_S_AHB4ENSETR_GPIOBEN BIT(1) #define RCC_MP_S_AHB4ENSETR_GPIOCEN BIT(2) #define RCC_MP_S_AHB4ENSETR_GPIODEN BIT(3) #define RCC_MP_S_AHB4ENSETR_GPIOEEN BIT(4) #define RCC_MP_S_AHB4ENSETR_GPIOFEN BIT(5) #define RCC_MP_S_AHB4ENSETR_GPIOGEN BIT(6) #define RCC_MP_S_AHB4ENSETR_GPIOHEN BIT(7) #define RCC_MP_S_AHB4ENSETR_GPIOIEN BIT(8) /* RCC_MP_S_AHB4ENCLRR register fields */ #define RCC_MP_S_AHB4ENCLRR_GPIOAEN BIT(0) #define RCC_MP_S_AHB4ENCLRR_GPIOBEN BIT(1) #define RCC_MP_S_AHB4ENCLRR_GPIOCEN BIT(2) #define RCC_MP_S_AHB4ENCLRR_GPIODEN BIT(3) #define RCC_MP_S_AHB4ENCLRR_GPIOEEN BIT(4) #define RCC_MP_S_AHB4ENCLRR_GPIOFEN BIT(5) #define RCC_MP_S_AHB4ENCLRR_GPIOGEN BIT(6) #define RCC_MP_S_AHB4ENCLRR_GPIOHEN BIT(7) #define RCC_MP_S_AHB4ENCLRR_GPIOIEN BIT(8) /* RCC_MP_NS_AHB4ENSETR register fields */ #define RCC_MP_NS_AHB4ENSETR_GPIOAEN BIT(0) #define RCC_MP_NS_AHB4ENSETR_GPIOBEN BIT(1) #define RCC_MP_NS_AHB4ENSETR_GPIOCEN BIT(2) #define RCC_MP_NS_AHB4ENSETR_GPIODEN BIT(3) #define RCC_MP_NS_AHB4ENSETR_GPIOEEN BIT(4) #define RCC_MP_NS_AHB4ENSETR_GPIOFEN BIT(5) #define RCC_MP_NS_AHB4ENSETR_GPIOGEN BIT(6) #define RCC_MP_NS_AHB4ENSETR_GPIOHEN BIT(7) #define RCC_MP_NS_AHB4ENSETR_GPIOIEN BIT(8) /* RCC_MP_NS_AHB4ENCLRR register fields */ #define RCC_MP_NS_AHB4ENCLRR_GPIOAEN BIT(0) #define RCC_MP_NS_AHB4ENCLRR_GPIOBEN BIT(1) #define RCC_MP_NS_AHB4ENCLRR_GPIOCEN BIT(2) #define RCC_MP_NS_AHB4ENCLRR_GPIODEN BIT(3) #define RCC_MP_NS_AHB4ENCLRR_GPIOEEN BIT(4) #define RCC_MP_NS_AHB4ENCLRR_GPIOFEN BIT(5) #define RCC_MP_NS_AHB4ENCLRR_GPIOGEN BIT(6) #define RCC_MP_NS_AHB4ENCLRR_GPIOHEN BIT(7) #define RCC_MP_NS_AHB4ENCLRR_GPIOIEN BIT(8) /* RCC_MP_AHB5ENSETR register fields */ #define RCC_MP_AHB5ENSETR_PKAEN BIT(2) #define RCC_MP_AHB5ENSETR_SAESEN BIT(3) #define RCC_MP_AHB5ENSETR_CRYP1EN BIT(4) #define RCC_MP_AHB5ENSETR_HASH1EN BIT(5) #define RCC_MP_AHB5ENSETR_RNG1EN BIT(6) #define RCC_MP_AHB5ENSETR_BKPSRAMEN BIT(8) #define RCC_MP_AHB5ENSETR_AXIMCEN BIT(16) /* RCC_MP_AHB5ENCLRR register fields */ #define RCC_MP_AHB5ENCLRR_PKAEN BIT(2) #define RCC_MP_AHB5ENCLRR_SAESEN BIT(3) #define RCC_MP_AHB5ENCLRR_CRYP1EN BIT(4) #define RCC_MP_AHB5ENCLRR_HASH1EN BIT(5) #define RCC_MP_AHB5ENCLRR_RNG1EN BIT(6) #define RCC_MP_AHB5ENCLRR_BKPSRAMEN BIT(8) #define RCC_MP_AHB5ENCLRR_AXIMCEN BIT(16) /* RCC_MP_AHB6ENSETR register fields */ #define RCC_MP_AHB6ENSETR_MCEEN BIT(1) #define RCC_MP_AHB6ENSETR_ETH1CKEN BIT(7) #define RCC_MP_AHB6ENSETR_ETH1TXEN BIT(8) #define RCC_MP_AHB6ENSETR_ETH1RXEN BIT(9) #define RCC_MP_AHB6ENSETR_ETH1MACEN BIT(10) #define RCC_MP_AHB6ENSETR_FMCEN BIT(12) #define RCC_MP_AHB6ENSETR_QSPIEN BIT(14) #define RCC_MP_AHB6ENSETR_SDMMC1EN BIT(16) #define RCC_MP_AHB6ENSETR_SDMMC2EN BIT(17) #define RCC_MP_AHB6ENSETR_CRC1EN BIT(20) #define RCC_MP_AHB6ENSETR_USBHEN BIT(24) #define RCC_MP_AHB6ENSETR_ETH2CKEN BIT(27) #define RCC_MP_AHB6ENSETR_ETH2TXEN BIT(28) #define RCC_MP_AHB6ENSETR_ETH2RXEN BIT(29) #define RCC_MP_AHB6ENSETR_ETH2MACEN BIT(30) /* RCC_MP_AHB6ENCLRR register fields */ #define RCC_MP_AHB6ENCLRR_MCEEN BIT(1) #define RCC_MP_AHB6ENCLRR_ETH1CKEN BIT(7) #define RCC_MP_AHB6ENCLRR_ETH1TXEN BIT(8) #define RCC_MP_AHB6ENCLRR_ETH1RXEN BIT(9) #define RCC_MP_AHB6ENCLRR_ETH1MACEN BIT(10) #define RCC_MP_AHB6ENCLRR_FMCEN BIT(12) #define RCC_MP_AHB6ENCLRR_QSPIEN BIT(14) #define RCC_MP_AHB6ENCLRR_SDMMC1EN BIT(16) #define RCC_MP_AHB6ENCLRR_SDMMC2EN BIT(17) #define RCC_MP_AHB6ENCLRR_CRC1EN BIT(20) #define RCC_MP_AHB6ENCLRR_USBHEN BIT(24) #define RCC_MP_AHB6ENCLRR_ETH2CKEN BIT(27) #define RCC_MP_AHB6ENCLRR_ETH2TXEN BIT(28) #define RCC_MP_AHB6ENCLRR_ETH2RXEN BIT(29) #define RCC_MP_AHB6ENCLRR_ETH2MACEN BIT(30) /* RCC_MP_S_AHB6ENSETR register fields */ #define RCC_MP_S_AHB6ENSETR_MDMAEN BIT(0) /* RCC_MP_S_AHB6ENCLRR register fields */ #define RCC_MP_S_AHB6ENCLRR_MDMAEN BIT(0) /* RCC_MP_NS_AHB6ENSETR register fields */ #define RCC_MP_NS_AHB6ENSETR_MDMAEN BIT(0) /* RCC_MP_NS_AHB6ENCLRR register fields */ #define RCC_MP_NS_AHB6ENCLRR_MDMAEN BIT(0) /* RCC_MP_APB1LPENSETR register fields */ #define RCC_MP_APB1LPENSETR_TIM2LPEN BIT(0) #define RCC_MP_APB1LPENSETR_TIM3LPEN BIT(1) #define RCC_MP_APB1LPENSETR_TIM4LPEN BIT(2) #define RCC_MP_APB1LPENSETR_TIM5LPEN BIT(3) #define RCC_MP_APB1LPENSETR_TIM6LPEN BIT(4) #define RCC_MP_APB1LPENSETR_TIM7LPEN BIT(5) #define RCC_MP_APB1LPENSETR_LPTIM1LPEN BIT(9) #define RCC_MP_APB1LPENSETR_SPI2LPEN BIT(11) #define RCC_MP_APB1LPENSETR_SPI3LPEN BIT(12) #define RCC_MP_APB1LPENSETR_USART3LPEN BIT(15) #define RCC_MP_APB1LPENSETR_UART4LPEN BIT(16) #define RCC_MP_APB1LPENSETR_UART5LPEN BIT(17) #define RCC_MP_APB1LPENSETR_UART7LPEN BIT(18) #define RCC_MP_APB1LPENSETR_UART8LPEN BIT(19) #define RCC_MP_APB1LPENSETR_I2C1LPEN BIT(21) #define RCC_MP_APB1LPENSETR_I2C2LPEN BIT(22) #define RCC_MP_APB1LPENSETR_SPDIFLPEN BIT(26) /* RCC_MP_APB1LPENCLRR register fields */ #define RCC_MP_APB1LPENCLRR_TIM2LPEN BIT(0) #define RCC_MP_APB1LPENCLRR_TIM3LPEN BIT(1) #define RCC_MP_APB1LPENCLRR_TIM4LPEN BIT(2) #define RCC_MP_APB1LPENCLRR_TIM5LPEN BIT(3) #define RCC_MP_APB1LPENCLRR_TIM6LPEN BIT(4) #define RCC_MP_APB1LPENCLRR_TIM7LPEN BIT(5) #define RCC_MP_APB1LPENCLRR_LPTIM1LPEN BIT(9) #define RCC_MP_APB1LPENCLRR_SPI2LPEN BIT(11) #define RCC_MP_APB1LPENCLRR_SPI3LPEN BIT(12) #define RCC_MP_APB1LPENCLRR_USART3LPEN BIT(15) #define RCC_MP_APB1LPENCLRR_UART4LPEN BIT(16) #define RCC_MP_APB1LPENCLRR_UART5LPEN BIT(17) #define RCC_MP_APB1LPENCLRR_UART7LPEN BIT(18) #define RCC_MP_APB1LPENCLRR_UART8LPEN BIT(19) #define RCC_MP_APB1LPENCLRR_I2C1LPEN BIT(21) #define RCC_MP_APB1LPENCLRR_I2C2LPEN BIT(22) #define RCC_MP_APB1LPENCLRR_SPDIFLPEN BIT(26) /* RCC_MP_APB2LPENSETR register fields */ #define RCC_MP_APB2LPENSETR_TIM1LPEN BIT(0) #define RCC_MP_APB2LPENSETR_TIM8LPEN BIT(1) #define RCC_MP_APB2LPENSETR_SPI1LPEN BIT(8) #define RCC_MP_APB2LPENSETR_USART6LPEN BIT(13) #define RCC_MP_APB2LPENSETR_SAI1LPEN BIT(16) #define RCC_MP_APB2LPENSETR_SAI2LPEN BIT(17) #define RCC_MP_APB2LPENSETR_DFSDMLPEN BIT(20) #define RCC_MP_APB2LPENSETR_ADFSDMLPEN BIT(21) #define RCC_MP_APB2LPENSETR_FDCANLPEN BIT(24) /* RCC_MP_APB2LPENCLRR register fields */ #define RCC_MP_APB2LPENCLRR_TIM1LPEN BIT(0) #define RCC_MP_APB2LPENCLRR_TIM8LPEN BIT(1) #define RCC_MP_APB2LPENCLRR_SPI1LPEN BIT(8) #define RCC_MP_APB2LPENCLRR_USART6LPEN BIT(13) #define RCC_MP_APB2LPENCLRR_SAI1LPEN BIT(16) #define RCC_MP_APB2LPENCLRR_SAI2LPEN BIT(17) #define RCC_MP_APB2LPENCLRR_DFSDMLPEN BIT(20) #define RCC_MP_APB2LPENCLRR_ADFSDMLPEN BIT(21) #define RCC_MP_APB2LPENCLRR_FDCANLPEN BIT(24) /* RCC_MP_APB3LPENSETR register fields */ #define RCC_MP_APB3LPENSETR_LPTIM2LPEN BIT(0) #define RCC_MP_APB3LPENSETR_LPTIM3LPEN BIT(1) #define RCC_MP_APB3LPENSETR_LPTIM4LPEN BIT(2) #define RCC_MP_APB3LPENSETR_LPTIM5LPEN BIT(3) #define RCC_MP_APB3LPENSETR_VREFLPEN BIT(13) #define RCC_MP_APB3LPENSETR_DTSLPEN BIT(16) #define RCC_MP_APB3LPENSETR_PMBCTRLLPEN BIT(17) /* RCC_MP_APB3LPENCLRR register fields */ #define RCC_MP_APB3LPENCLRR_LPTIM2LPEN BIT(0) #define RCC_MP_APB3LPENCLRR_LPTIM3LPEN BIT(1) #define RCC_MP_APB3LPENCLRR_LPTIM4LPEN BIT(2) #define RCC_MP_APB3LPENCLRR_LPTIM5LPEN BIT(3) #define RCC_MP_APB3LPENCLRR_VREFLPEN BIT(13) #define RCC_MP_APB3LPENCLRR_DTSLPEN BIT(16) #define RCC_MP_APB3LPENCLRR_PMBCTRLLPEN BIT(17) /* RCC_MP_S_APB3LPENSETR register fields */ #define RCC_MP_S_APB3LPENSETR_SYSCFGLPEN BIT(0) /* RCC_MP_S_APB3LPENCLRR register fields */ #define RCC_MP_S_APB3LPENCLRR_SYSCFGLPEN BIT(0) /* RCC_MP_NS_APB3LPENSETR register fields */ #define RCC_MP_NS_APB3LPENSETR_SYSCFGLPEN BIT(0) /* RCC_MP_NS_APB3LPENCLRR register fields */ #define RCC_MP_NS_APB3LPENCLRR_SYSCFGLPEN BIT(0) /* RCC_MP_APB4LPENSETR register fields */ #define RCC_MP_APB4LPENSETR_DCMIPPLPEN BIT(1) #define RCC_MP_APB4LPENSETR_DDRPERFMLPEN BIT(8) #define RCC_MP_APB4LPENSETR_IWDG2APBLPEN BIT(15) #define RCC_MP_APB4LPENSETR_USBPHYLPEN BIT(16) #define RCC_MP_APB4LPENSETR_STGENROLPEN BIT(20) #define RCC_MP_APB4LPENSETR_STGENROSTPEN BIT(21) /* RCC_MP_APB4LPENCLRR register fields */ #define RCC_MP_APB4LPENCLRR_DCMIPPLPEN BIT(1) #define RCC_MP_APB4LPENCLRR_DDRPERFMLPEN BIT(8) #define RCC_MP_APB4LPENCLRR_IWDG2APBLPEN BIT(15) #define RCC_MP_APB4LPENCLRR_USBPHYLPEN BIT(16) #define RCC_MP_APB4LPENCLRR_STGENROLPEN BIT(20) #define RCC_MP_APB4LPENCLRR_STGENROSTPEN BIT(21) /* RCC_MP_S_APB4LPENSETR register fields */ #define RCC_MP_S_APB4LPENSETR_LTDCLPEN BIT(0) /* RCC_MP_S_APB4LPENCLRR register fields */ #define RCC_MP_S_APB4LPENCLRR_LTDCLPEN BIT(0) /* RCC_MP_NS_APB4LPENSETR register fields */ #define RCC_MP_NS_APB4LPENSETR_LTDCLPEN BIT(0) /* RCC_MP_NS_APB4LPENCLRR register fields */ #define RCC_MP_NS_APB4LPENCLRR_LTDCLPEN BIT(0) /* RCC_MP_APB5LPENSETR register fields */ #define RCC_MP_APB5LPENSETR_RTCAPBLPEN BIT(8) #define RCC_MP_APB5LPENSETR_TZCLPEN BIT(11) #define RCC_MP_APB5LPENSETR_ETZPCLPEN BIT(13) #define RCC_MP_APB5LPENSETR_IWDG1APBLPEN BIT(15) #define RCC_MP_APB5LPENSETR_BSECLPEN BIT(16) #define RCC_MP_APB5LPENSETR_STGENCLPEN BIT(20) #define RCC_MP_APB5LPENSETR_STGENCSTPEN BIT(21) /* RCC_MP_APB5LPENCLRR register fields */ #define RCC_MP_APB5LPENCLRR_RTCAPBLPEN BIT(8) #define RCC_MP_APB5LPENCLRR_TZCLPEN BIT(11) #define RCC_MP_APB5LPENCLRR_ETZPCLPEN BIT(13) #define RCC_MP_APB5LPENCLRR_IWDG1APBLPEN BIT(15) #define RCC_MP_APB5LPENCLRR_BSECLPEN BIT(16) #define RCC_MP_APB5LPENCLRR_STGENCLPEN BIT(20) #define RCC_MP_APB5LPENCLRR_STGENCSTPEN BIT(21) /* RCC_MP_APB6LPENSETR register fields */ #define RCC_MP_APB6LPENSETR_USART1LPEN BIT(0) #define RCC_MP_APB6LPENSETR_USART2LPEN BIT(1) #define RCC_MP_APB6LPENSETR_SPI4LPEN BIT(2) #define RCC_MP_APB6LPENSETR_SPI5LPEN BIT(3) #define RCC_MP_APB6LPENSETR_I2C3LPEN BIT(4) #define RCC_MP_APB6LPENSETR_I2C4LPEN BIT(5) #define RCC_MP_APB6LPENSETR_I2C5LPEN BIT(6) #define RCC_MP_APB6LPENSETR_TIM12LPEN BIT(7) #define RCC_MP_APB6LPENSETR_TIM13LPEN BIT(8) #define RCC_MP_APB6LPENSETR_TIM14LPEN BIT(9) #define RCC_MP_APB6LPENSETR_TIM15LPEN BIT(10) #define RCC_MP_APB6LPENSETR_TIM16LPEN BIT(11) #define RCC_MP_APB6LPENSETR_TIM17LPEN BIT(12) /* RCC_MP_APB6LPENCLRR register fields */ #define RCC_MP_APB6LPENCLRR_USART1LPEN BIT(0) #define RCC_MP_APB6LPENCLRR_USART2LPEN BIT(1) #define RCC_MP_APB6LPENCLRR_SPI4LPEN BIT(2) #define RCC_MP_APB6LPENCLRR_SPI5LPEN BIT(3) #define RCC_MP_APB6LPENCLRR_I2C3LPEN BIT(4) #define RCC_MP_APB6LPENCLRR_I2C4LPEN BIT(5) #define RCC_MP_APB6LPENCLRR_I2C5LPEN BIT(6) #define RCC_MP_APB6LPENCLRR_TIM12LPEN BIT(7) #define RCC_MP_APB6LPENCLRR_TIM13LPEN BIT(8) #define RCC_MP_APB6LPENCLRR_TIM14LPEN BIT(9) #define RCC_MP_APB6LPENCLRR_TIM15LPEN BIT(10) #define RCC_MP_APB6LPENCLRR_TIM16LPEN BIT(11) #define RCC_MP_APB6LPENCLRR_TIM17LPEN BIT(12) /* RCC_MP_AHB2LPENSETR register fields */ #define RCC_MP_AHB2LPENSETR_DMA1LPEN BIT(0) #define RCC_MP_AHB2LPENSETR_DMA2LPEN BIT(1) #define RCC_MP_AHB2LPENSETR_DMAMUX1LPEN BIT(2) #define RCC_MP_AHB2LPENSETR_DMA3LPEN BIT(3) #define RCC_MP_AHB2LPENSETR_DMAMUX2LPEN BIT(4) #define RCC_MP_AHB2LPENSETR_ADC1LPEN BIT(5) #define RCC_MP_AHB2LPENSETR_ADC2LPEN BIT(6) #define RCC_MP_AHB2LPENSETR_USBOLPEN BIT(8) /* RCC_MP_AHB2LPENCLRR register fields */ #define RCC_MP_AHB2LPENCLRR_DMA1LPEN BIT(0) #define RCC_MP_AHB2LPENCLRR_DMA2LPEN BIT(1) #define RCC_MP_AHB2LPENCLRR_DMAMUX1LPEN BIT(2) #define RCC_MP_AHB2LPENCLRR_DMA3LPEN BIT(3) #define RCC_MP_AHB2LPENCLRR_DMAMUX2LPEN BIT(4) #define RCC_MP_AHB2LPENCLRR_ADC1LPEN BIT(5) #define RCC_MP_AHB2LPENCLRR_ADC2LPEN BIT(6) #define RCC_MP_AHB2LPENCLRR_USBOLPEN BIT(8) /* RCC_MP_AHB4LPENSETR register fields */ #define RCC_MP_AHB4LPENSETR_TSCLPEN BIT(15) /* RCC_MP_AHB4LPENCLRR register fields */ #define RCC_MP_AHB4LPENCLRR_TSCLPEN BIT(15) /* RCC_MP_S_AHB4LPENSETR register fields */ #define RCC_MP_S_AHB4LPENSETR_GPIOALPEN BIT(0) #define RCC_MP_S_AHB4LPENSETR_GPIOBLPEN BIT(1) #define RCC_MP_S_AHB4LPENSETR_GPIOCLPEN BIT(2) #define RCC_MP_S_AHB4LPENSETR_GPIODLPEN BIT(3) #define RCC_MP_S_AHB4LPENSETR_GPIOELPEN BIT(4) #define RCC_MP_S_AHB4LPENSETR_GPIOFLPEN BIT(5) #define RCC_MP_S_AHB4LPENSETR_GPIOGLPEN BIT(6) #define RCC_MP_S_AHB4LPENSETR_GPIOHLPEN BIT(7) #define RCC_MP_S_AHB4LPENSETR_GPIOILPEN BIT(8) /* RCC_MP_S_AHB4LPENCLRR register fields */ #define RCC_MP_S_AHB4LPENCLRR_GPIOALPEN BIT(0) #define RCC_MP_S_AHB4LPENCLRR_GPIOBLPEN BIT(1) #define RCC_MP_S_AHB4LPENCLRR_GPIOCLPEN BIT(2) #define RCC_MP_S_AHB4LPENCLRR_GPIODLPEN BIT(3) #define RCC_MP_S_AHB4LPENCLRR_GPIOELPEN BIT(4) #define RCC_MP_S_AHB4LPENCLRR_GPIOFLPEN BIT(5) #define RCC_MP_S_AHB4LPENCLRR_GPIOGLPEN BIT(6) #define RCC_MP_S_AHB4LPENCLRR_GPIOHLPEN BIT(7) #define RCC_MP_S_AHB4LPENCLRR_GPIOILPEN BIT(8) /* RCC_MP_NS_AHB4LPENSETR register fields */ #define RCC_MP_NS_AHB4LPENSETR_GPIOALPEN BIT(0) #define RCC_MP_NS_AHB4LPENSETR_GPIOBLPEN BIT(1) #define RCC_MP_NS_AHB4LPENSETR_GPIOCLPEN BIT(2) #define RCC_MP_NS_AHB4LPENSETR_GPIODLPEN BIT(3) #define RCC_MP_NS_AHB4LPENSETR_GPIOELPEN BIT(4) #define RCC_MP_NS_AHB4LPENSETR_GPIOFLPEN BIT(5) #define RCC_MP_NS_AHB4LPENSETR_GPIOGLPEN BIT(6) #define RCC_MP_NS_AHB4LPENSETR_GPIOHLPEN BIT(7) #define RCC_MP_NS_AHB4LPENSETR_GPIOILPEN BIT(8) /* RCC_MP_NS_AHB4LPENCLRR register fields */ #define RCC_MP_NS_AHB4LPENCLRR_GPIOALPEN BIT(0) #define RCC_MP_NS_AHB4LPENCLRR_GPIOBLPEN BIT(1) #define RCC_MP_NS_AHB4LPENCLRR_GPIOCLPEN BIT(2) #define RCC_MP_NS_AHB4LPENCLRR_GPIODLPEN BIT(3) #define RCC_MP_NS_AHB4LPENCLRR_GPIOELPEN BIT(4) #define RCC_MP_NS_AHB4LPENCLRR_GPIOFLPEN BIT(5) #define RCC_MP_NS_AHB4LPENCLRR_GPIOGLPEN BIT(6) #define RCC_MP_NS_AHB4LPENCLRR_GPIOHLPEN BIT(7) #define RCC_MP_NS_AHB4LPENCLRR_GPIOILPEN BIT(8) /* RCC_MP_AHB5LPENSETR register fields */ #define RCC_MP_AHB5LPENSETR_PKALPEN BIT(2) #define RCC_MP_AHB5LPENSETR_SAESLPEN BIT(3) #define RCC_MP_AHB5LPENSETR_CRYP1LPEN BIT(4) #define RCC_MP_AHB5LPENSETR_HASH1LPEN BIT(5) #define RCC_MP_AHB5LPENSETR_RNG1LPEN BIT(6) #define RCC_MP_AHB5LPENSETR_BKPSRAMLPEN BIT(8) /* RCC_MP_AHB5LPENCLRR register fields */ #define RCC_MP_AHB5LPENCLRR_PKALPEN BIT(2) #define RCC_MP_AHB5LPENCLRR_SAESLPEN BIT(3) #define RCC_MP_AHB5LPENCLRR_CRYP1LPEN BIT(4) #define RCC_MP_AHB5LPENCLRR_HASH1LPEN BIT(5) #define RCC_MP_AHB5LPENCLRR_RNG1LPEN BIT(6) #define RCC_MP_AHB5LPENCLRR_BKPSRAMLPEN BIT(8) /* RCC_MP_AHB6LPENSETR register fields */ #define RCC_MP_AHB6LPENSETR_MCELPEN BIT(1) #define RCC_MP_AHB6LPENSETR_ETH1CKLPEN BIT(7) #define RCC_MP_AHB6LPENSETR_ETH1TXLPEN BIT(8) #define RCC_MP_AHB6LPENSETR_ETH1RXLPEN BIT(9) #define RCC_MP_AHB6LPENSETR_ETH1MACLPEN BIT(10) #define RCC_MP_AHB6LPENSETR_ETH1STPEN BIT(11) #define RCC_MP_AHB6LPENSETR_FMCLPEN BIT(12) #define RCC_MP_AHB6LPENSETR_QSPILPEN BIT(14) #define RCC_MP_AHB6LPENSETR_SDMMC1LPEN BIT(16) #define RCC_MP_AHB6LPENSETR_SDMMC2LPEN BIT(17) #define RCC_MP_AHB6LPENSETR_CRC1LPEN BIT(20) #define RCC_MP_AHB6LPENSETR_USBHLPEN BIT(24) #define RCC_MP_AHB6LPENSETR_ETH2CKLPEN BIT(27) #define RCC_MP_AHB6LPENSETR_ETH2TXLPEN BIT(28) #define RCC_MP_AHB6LPENSETR_ETH2RXLPEN BIT(29) #define RCC_MP_AHB6LPENSETR_ETH2MACLPEN BIT(30) #define RCC_MP_AHB6LPENSETR_ETH2STPEN BIT(31) /* RCC_MP_AHB6LPENCLRR register fields */ #define RCC_MP_AHB6LPENCLRR_MCELPEN BIT(1) #define RCC_MP_AHB6LPENCLRR_ETH1CKLPEN BIT(7) #define RCC_MP_AHB6LPENCLRR_ETH1TXLPEN BIT(8) #define RCC_MP_AHB6LPENCLRR_ETH1RXLPEN BIT(9) #define RCC_MP_AHB6LPENCLRR_ETH1MACLPEN BIT(10) #define RCC_MP_AHB6LPENCLRR_ETH1STPEN BIT(11) #define RCC_MP_AHB6LPENCLRR_FMCLPEN BIT(12) #define RCC_MP_AHB6LPENCLRR_QSPILPEN BIT(14) #define RCC_MP_AHB6LPENCLRR_SDMMC1LPEN BIT(16) #define RCC_MP_AHB6LPENCLRR_SDMMC2LPEN BIT(17) #define RCC_MP_AHB6LPENCLRR_CRC1LPEN BIT(20) #define RCC_MP_AHB6LPENCLRR_USBHLPEN BIT(24) #define RCC_MP_AHB6LPENCLRR_ETH2CKLPEN BIT(27) #define RCC_MP_AHB6LPENCLRR_ETH2TXLPEN BIT(28) #define RCC_MP_AHB6LPENCLRR_ETH2RXLPEN BIT(29) #define RCC_MP_AHB6LPENCLRR_ETH2MACLPEN BIT(30) #define RCC_MP_AHB6LPENCLRR_ETH2STPEN BIT(31) /* RCC_MP_S_AHB6LPENSETR register fields */ #define RCC_MP_S_AHB6LPENSETR_MDMALPEN BIT(0) /* RCC_MP_S_AHB6LPENCLRR register fields */ #define RCC_MP_S_AHB6LPENCLRR_MDMALPEN BIT(0) /* RCC_MP_NS_AHB6LPENSETR register fields */ #define RCC_MP_NS_AHB6LPENSETR_MDMALPEN BIT(0) /* RCC_MP_NS_AHB6LPENCLRR register fields */ #define RCC_MP_NS_AHB6LPENCLRR_MDMALPEN BIT(0) /* RCC_MP_S_AXIMLPENSETR register fields */ #define RCC_MP_S_AXIMLPENSETR_SYSRAMLPEN BIT(0) /* RCC_MP_S_AXIMLPENCLRR register fields */ #define RCC_MP_S_AXIMLPENCLRR_SYSRAMLPEN BIT(0) /* RCC_MP_NS_AXIMLPENSETR register fields */ #define RCC_MP_NS_AXIMLPENSETR_SYSRAMLPEN BIT(0) /* RCC_MP_NS_AXIMLPENCLRR register fields */ #define RCC_MP_NS_AXIMLPENCLRR_SYSRAMLPEN BIT(0) /* RCC_MP_MLAHBLPENSETR register fields */ #define RCC_MP_MLAHBLPENSETR_SRAM1LPEN BIT(0) #define RCC_MP_MLAHBLPENSETR_SRAM2LPEN BIT(1) #define RCC_MP_MLAHBLPENSETR_SRAM3LPEN BIT(2) /* RCC_MP_MLAHBLPENCLRR register fields */ #define RCC_MP_MLAHBLPENCLRR_SRAM1LPEN BIT(0) #define RCC_MP_MLAHBLPENCLRR_SRAM2LPEN BIT(1) #define RCC_MP_MLAHBLPENCLRR_SRAM3LPEN BIT(2) /* RCC_APB3SECSR register fields */ #define RCC_APB3SECSR_LPTIM2SECF 0 #define RCC_APB3SECSR_LPTIM3SECF 1 #define RCC_APB3SECSR_VREFSECF 13 /* RCC_APB4SECSR register fields */ #define RCC_APB4SECSR_DCMIPPSECF 1 #define RCC_APB4SECSR_USBPHYSECF 16 /* RCC_APB5SECSR register fields */ #define RCC_APB5SECSR_RTCSECF 8 #define RCC_APB5SECSR_TZCSECF 11 #define RCC_APB5SECSR_ETZPCSECF 13 #define RCC_APB5SECSR_IWDG1SECF 15 #define RCC_APB5SECSR_BSECSECF 16 #define RCC_APB5SECSR_STGENCSECF_MASK GENMASK(21, 20) #define RCC_APB5SECSR_STGENCSECF 20 #define RCC_APB5SECSR_STGENROSECF 21 /* RCC_APB6SECSR register fields */ #define RCC_APB6SECSR_USART1SECF 0 #define RCC_APB6SECSR_USART2SECF 1 #define RCC_APB6SECSR_SPI4SECF 2 #define RCC_APB6SECSR_SPI5SECF 3 #define RCC_APB6SECSR_I2C3SECF 4 #define RCC_APB6SECSR_I2C4SECF 5 #define RCC_APB6SECSR_I2C5SECF 6 #define RCC_APB6SECSR_TIM12SECF 7 #define RCC_APB6SECSR_TIM13SECF 8 #define RCC_APB6SECSR_TIM14SECF 9 #define RCC_APB6SECSR_TIM15SECF 10 #define RCC_APB6SECSR_TIM16SECF 11 #define RCC_APB6SECSR_TIM17SECF 12 /* RCC_AHB2SECSR register fields */ #define RCC_AHB2SECSR_DMA3SECF 3 #define RCC_AHB2SECSR_DMAMUX2SECF 4 #define RCC_AHB2SECSR_ADC1SECF 5 #define RCC_AHB2SECSR_ADC2SECF 6 #define RCC_AHB2SECSR_USBOSECF 8 /* RCC_AHB4SECSR register fields */ #define RCC_AHB4SECSR_TSCSECF 15 /* RCC_AHB5SECSR register fields */ #define RCC_AHB5SECSR_PKASECF 2 #define RCC_AHB5SECSR_SAESSECF 3 #define RCC_AHB5SECSR_CRYP1SECF 4 #define RCC_AHB5SECSR_HASH1SECF 5 #define RCC_AHB5SECSR_RNG1SECF 6 #define RCC_AHB5SECSR_BKPSRAMSECF 8 /* RCC_AHB6SECSR register fields */ #define RCC_AHB6SECSR_MCESECF 1 #define RCC_AHB6SECSR_FMCSECF 12 #define RCC_AHB6SECSR_QSPISECF 14 #define RCC_AHB6SECSR_SDMMC1SECF 16 #define RCC_AHB6SECSR_SDMMC2SECF 17 #define RCC_AHB6SECSR_ETH1SECF_MASK GENMASK(11, 7) #define RCC_AHB6SECSR_ETH2SECF_MASK GENMASK(31, 27) #define RCC_AHB6SECSR_ETH1SECF_SHIFT 7 #define RCC_AHB6SECSR_ETH2SECF_SHIFT 27 #define RCC_AHB6SECSR_ETH1CKSECF 7 #define RCC_AHB6SECSR_ETH1TXSECF 8 #define RCC_AHB6SECSR_ETH1RXSECF 9 #define RCC_AHB6SECSR_ETH1MACSECF 10 #define RCC_AHB6SECSR_ETH1STPSECF 11 #define RCC_AHB6SECSR_ETH2CKSECF 27 #define RCC_AHB6SECSR_ETH2TXSECF 28 #define RCC_AHB6SECSR_ETH2RXSECF 29 #define RCC_AHB6SECSR_ETH2MACSECF 30 #define RCC_AHB6SECSR_ETH2STPSECF 31 /* RCC_VERR register fields */ #define RCC_VERR_MINREV_MASK GENMASK(3, 0) #define RCC_VERR_MAJREV_MASK GENMASK(7, 4) #define RCC_VERR_MINREV_SHIFT 0 #define RCC_VERR_MAJREV_SHIFT 4 /* RCC_IDR register fields */ #define RCC_IDR_ID_MASK GENMASK(31, 0) #define RCC_IDR_ID_SHIFT 0 /* RCC_SIDR register fields */ #define RCC_SIDR_SID_MASK GENMASK(31, 0) #define RCC_SIDR_SID_SHIFT 0 #endif /* STM32MP13_RCC_H */
// SPDX-License-Identifier: GPL-2.0-only /* * linux/drivers/rtc/rtc-pl030.c * * Copyright (C) 2000-2001 Deep Blue Solutions Ltd. */ #include <linux/module.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/amba/bus.h> #include <linux/io.h> #include <linux/slab.h> #define RTC_DR (0) #define RTC_MR (4) #define RTC_STAT (8) #define RTC_EOI (8) #define RTC_LR (12) #define RTC_CR (16) #define RTC_CR_MIE (1 << 0) struct pl030_rtc { struct rtc_device *rtc; void __iomem *base; }; static irqreturn_t pl030_interrupt(int irq, void *dev_id) { struct pl030_rtc *rtc = dev_id; writel(0, rtc->base + RTC_EOI); return IRQ_HANDLED; } static int pl030_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); rtc_time64_to_tm(readl(rtc->base + RTC_MR), &alrm->time); return 0; } static int pl030_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); writel(rtc_tm_to_time64(&alrm->time), rtc->base + RTC_MR); return 0; } static int pl030_read_time(struct device *dev, struct rtc_time *tm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); rtc_time64_to_tm(readl(rtc->base + RTC_DR), tm); return 0; } /* * Set the RTC time. Unfortunately, we can't accurately set * the point at which the counter updates. * * Also, since RTC_LR is transferred to RTC_CR on next rising * edge of the 1Hz clock, we must write the time one second * in advance. */ static int pl030_set_time(struct device *dev, struct rtc_time *tm) { struct pl030_rtc *rtc = dev_get_drvdata(dev); writel(rtc_tm_to_time64(tm) + 1, rtc->base + RTC_LR); return 0; } static const struct rtc_class_ops pl030_ops = { .read_time = pl030_read_time, .set_time = pl030_set_time, .read_alarm = pl030_read_alarm, .set_alarm = pl030_set_alarm, }; static int pl030_probe(struct amba_device *dev, const struct amba_id *id) { struct pl030_rtc *rtc; int ret; ret = amba_request_regions(dev, NULL); if (ret) goto err_req; rtc = devm_kzalloc(&dev->dev, sizeof(*rtc), GFP_KERNEL); if (!rtc) { ret = -ENOMEM; goto err_rtc; } rtc->rtc = devm_rtc_allocate_device(&dev->dev); if (IS_ERR(rtc->rtc)) { ret = PTR_ERR(rtc->rtc); goto err_rtc; } rtc->rtc->ops = &pl030_ops; rtc->rtc->range_max = U32_MAX; rtc->base = ioremap(dev->res.start, resource_size(&dev->res)); if (!rtc->base) { ret = -ENOMEM; goto err_rtc; } __raw_writel(0, rtc->base + RTC_CR); __raw_writel(0, rtc->base + RTC_EOI); amba_set_drvdata(dev, rtc); ret = request_irq(dev->irq[0], pl030_interrupt, 0, "rtc-pl030", rtc); if (ret) goto err_irq; ret = devm_rtc_register_device(rtc->rtc); if (ret) goto err_reg; return 0; err_reg: free_irq(dev->irq[0], rtc); err_irq: iounmap(rtc->base); err_rtc: amba_release_regions(dev); err_req: return ret; } static void pl030_remove(struct amba_device *dev) { struct pl030_rtc *rtc = amba_get_drvdata(dev); writel(0, rtc->base + RTC_CR); free_irq(dev->irq[0], rtc); iounmap(rtc->base); amba_release_regions(dev); } static struct amba_id pl030_ids[] = { { .id = 0x00041030, .mask = 0x000fffff, }, { 0, 0 }, }; MODULE_DEVICE_TABLE(amba, pl030_ids); static struct amba_driver pl030_driver = { .drv = { .name = "rtc-pl030", }, .probe = pl030_probe, .remove = pl030_remove, .id_table = pl030_ids, }; module_amba_driver(pl030_driver); MODULE_AUTHOR("Russell King <[email protected]>"); MODULE_DESCRIPTION("ARM AMBA PL030 RTC Driver"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2015 Markus Reichl * * Device Tree binding constants clocks for the Samsung S2MPS11 PMIC. */ #ifndef _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H #define _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H /* Fixed rate clocks. */ #define S2MPS11_CLK_AP 0 #define S2MPS11_CLK_CP 1 #define S2MPS11_CLK_BT 2 /* Total number of clocks. */ #define S2MPS11_CLKS_NUM (S2MPS11_CLK_BT + 1) #endif /* _DT_BINDINGS_CLOCK_SAMSUNG_S2MPS11_CLOCK_H */
// SPDX-License-Identifier: GPL-2.0-only /* * drivers/watchdog/orion_wdt.c * * Watchdog driver for Orion/Kirkwood processors * * Author: Sylver Bruneau <[email protected]> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/watchdog.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/of.h> #include <linux/of_device.h> /* RSTOUT mask register physical address for Orion5x, Kirkwood and Dove */ #define ORION_RSTOUT_MASK_OFFSET 0x20108 /* Internal registers can be configured at any 1 MiB aligned address */ #define INTERNAL_REGS_MASK ~(SZ_1M - 1) /* * Watchdog timer block registers. */ #define TIMER_CTRL 0x0000 #define TIMER1_FIXED_ENABLE_BIT BIT(12) #define WDT_AXP_FIXED_ENABLE_BIT BIT(10) #define TIMER1_ENABLE_BIT BIT(2) #define TIMER_A370_STATUS 0x0004 #define WDT_A370_EXPIRED BIT(31) #define TIMER1_STATUS_BIT BIT(8) #define TIMER1_VAL_OFF 0x001c #define WDT_MAX_CYCLE_COUNT 0xffffffff #define WDT_A370_RATIO_MASK(v) ((v) << 16) #define WDT_A370_RATIO_SHIFT 5 #define WDT_A370_RATIO (1 << WDT_A370_RATIO_SHIFT) static bool nowayout = WATCHDOG_NOWAYOUT; static int heartbeat; /* module parameter (seconds) */ struct orion_watchdog; struct orion_watchdog_data { int wdt_counter_offset; int wdt_enable_bit; int rstout_enable_bit; int rstout_mask_bit; int (*clock_init)(struct platform_device *, struct orion_watchdog *); int (*enabled)(struct orion_watchdog *); int (*start)(struct watchdog_device *); int (*stop)(struct watchdog_device *); }; struct orion_watchdog { struct watchdog_device wdt; void __iomem *reg; void __iomem *rstout; void __iomem *rstout_mask; unsigned long clk_rate; struct clk *clk; const struct orion_watchdog_data *data; }; static int orion_wdt_clock_init(struct platform_device *pdev, struct orion_watchdog *dev) { int ret; dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) return PTR_ERR(dev->clk); ret = clk_prepare_enable(dev->clk); if (ret) { clk_put(dev->clk); return ret; } dev->clk_rate = clk_get_rate(dev->clk); return 0; } static int armada370_wdt_clock_init(struct platform_device *pdev, struct orion_watchdog *dev) { int ret; dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) return PTR_ERR(dev->clk); ret = clk_prepare_enable(dev->clk); if (ret) { clk_put(dev->clk); return ret; } /* Setup watchdog input clock */ atomic_io_modify(dev->reg + TIMER_CTRL, WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT), WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT)); dev->clk_rate = clk_get_rate(dev->clk) / WDT_A370_RATIO; return 0; } static int armada375_wdt_clock_init(struct platform_device *pdev, struct orion_watchdog *dev) { int ret; dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed"); if (!IS_ERR(dev->clk)) { ret = clk_prepare_enable(dev->clk); if (ret) { clk_put(dev->clk); return ret; } atomic_io_modify(dev->reg + TIMER_CTRL, WDT_AXP_FIXED_ENABLE_BIT, WDT_AXP_FIXED_ENABLE_BIT); dev->clk_rate = clk_get_rate(dev->clk); return 0; } /* Mandatory fallback for proper devicetree backward compatibility */ dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(dev->clk)) return PTR_ERR(dev->clk); ret = clk_prepare_enable(dev->clk); if (ret) { clk_put(dev->clk); return ret; } atomic_io_modify(dev->reg + TIMER_CTRL, WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT), WDT_A370_RATIO_MASK(WDT_A370_RATIO_SHIFT)); dev->clk_rate = clk_get_rate(dev->clk) / WDT_A370_RATIO; return 0; } static int armadaxp_wdt_clock_init(struct platform_device *pdev, struct orion_watchdog *dev) { int ret; u32 val; dev->clk = of_clk_get_by_name(pdev->dev.of_node, "fixed"); if (IS_ERR(dev->clk)) return PTR_ERR(dev->clk); ret = clk_prepare_enable(dev->clk); if (ret) { clk_put(dev->clk); return ret; } /* Fix the wdt and timer1 clock frequency to 25MHz */ val = WDT_AXP_FIXED_ENABLE_BIT | TIMER1_FIXED_ENABLE_BIT; atomic_io_modify(dev->reg + TIMER_CTRL, val, val); dev->clk_rate = clk_get_rate(dev->clk); return 0; } static int orion_wdt_ping(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); /* Reload watchdog duration */ writel(dev->clk_rate * wdt_dev->timeout, dev->reg + dev->data->wdt_counter_offset); if (dev->wdt.info->options & WDIOF_PRETIMEOUT) writel(dev->clk_rate * (wdt_dev->timeout - wdt_dev->pretimeout), dev->reg + TIMER1_VAL_OFF); return 0; } static int armada375_start(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); u32 reg; /* Set watchdog duration */ writel(dev->clk_rate * wdt_dev->timeout, dev->reg + dev->data->wdt_counter_offset); if (dev->wdt.info->options & WDIOF_PRETIMEOUT) writel(dev->clk_rate * (wdt_dev->timeout - wdt_dev->pretimeout), dev->reg + TIMER1_VAL_OFF); /* Clear the watchdog expiration bit */ atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0); /* Enable watchdog timer */ reg = dev->data->wdt_enable_bit; if (dev->wdt.info->options & WDIOF_PRETIMEOUT) reg |= TIMER1_ENABLE_BIT; atomic_io_modify(dev->reg + TIMER_CTRL, reg, reg); /* Enable reset on watchdog */ reg = readl(dev->rstout); reg |= dev->data->rstout_enable_bit; writel(reg, dev->rstout); atomic_io_modify(dev->rstout_mask, dev->data->rstout_mask_bit, 0); return 0; } static int armada370_start(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); u32 reg; /* Set watchdog duration */ writel(dev->clk_rate * wdt_dev->timeout, dev->reg + dev->data->wdt_counter_offset); /* Clear the watchdog expiration bit */ atomic_io_modify(dev->reg + TIMER_A370_STATUS, WDT_A370_EXPIRED, 0); /* Enable watchdog timer */ reg = dev->data->wdt_enable_bit; if (dev->wdt.info->options & WDIOF_PRETIMEOUT) reg |= TIMER1_ENABLE_BIT; atomic_io_modify(dev->reg + TIMER_CTRL, reg, reg); /* Enable reset on watchdog */ reg = readl(dev->rstout); reg |= dev->data->rstout_enable_bit; writel(reg, dev->rstout); return 0; } static int orion_start(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); /* Set watchdog duration */ writel(dev->clk_rate * wdt_dev->timeout, dev->reg + dev->data->wdt_counter_offset); /* Enable watchdog timer */ atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, dev->data->wdt_enable_bit); /* Enable reset on watchdog */ atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit, dev->data->rstout_enable_bit); return 0; } static int orion_wdt_start(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); /* There are some per-SoC quirks to handle */ return dev->data->start(wdt_dev); } static int orion_stop(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); /* Disable reset on watchdog */ atomic_io_modify(dev->rstout, dev->data->rstout_enable_bit, 0); /* Disable watchdog timer */ atomic_io_modify(dev->reg + TIMER_CTRL, dev->data->wdt_enable_bit, 0); return 0; } static int armada375_stop(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); u32 reg, mask; /* Disable reset on watchdog */ atomic_io_modify(dev->rstout_mask, dev->data->rstout_mask_bit, dev->data->rstout_mask_bit); reg = readl(dev->rstout); reg &= ~dev->data->rstout_enable_bit; writel(reg, dev->rstout); /* Disable watchdog timer */ mask = dev->data->wdt_enable_bit; if (wdt_dev->info->options & WDIOF_PRETIMEOUT) mask |= TIMER1_ENABLE_BIT; atomic_io_modify(dev->reg + TIMER_CTRL, mask, 0); return 0; } static int armada370_stop(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); u32 reg, mask; /* Disable reset on watchdog */ reg = readl(dev->rstout); reg &= ~dev->data->rstout_enable_bit; writel(reg, dev->rstout); /* Disable watchdog timer */ mask = dev->data->wdt_enable_bit; if (wdt_dev->info->options & WDIOF_PRETIMEOUT) mask |= TIMER1_ENABLE_BIT; atomic_io_modify(dev->reg + TIMER_CTRL, mask, 0); return 0; } static int orion_wdt_stop(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); return dev->data->stop(wdt_dev); } static int orion_enabled(struct orion_watchdog *dev) { bool enabled, running; enabled = readl(dev->rstout) & dev->data->rstout_enable_bit; running = readl(dev->reg + TIMER_CTRL) & dev->data->wdt_enable_bit; return enabled && running; } static int armada375_enabled(struct orion_watchdog *dev) { bool masked, enabled, running; masked = readl(dev->rstout_mask) & dev->data->rstout_mask_bit; enabled = readl(dev->rstout) & dev->data->rstout_enable_bit; running = readl(dev->reg + TIMER_CTRL) & dev->data->wdt_enable_bit; return !masked && enabled && running; } static int orion_wdt_enabled(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); return dev->data->enabled(dev); } static unsigned int orion_wdt_get_timeleft(struct watchdog_device *wdt_dev) { struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); return readl(dev->reg + dev->data->wdt_counter_offset) / dev->clk_rate; } static struct watchdog_info orion_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "Orion Watchdog", }; static const struct watchdog_ops orion_wdt_ops = { .owner = THIS_MODULE, .start = orion_wdt_start, .stop = orion_wdt_stop, .ping = orion_wdt_ping, .get_timeleft = orion_wdt_get_timeleft, }; static irqreturn_t orion_wdt_irq(int irq, void *devid) { panic("Watchdog Timeout"); return IRQ_HANDLED; } static irqreturn_t orion_wdt_pre_irq(int irq, void *devid) { struct orion_watchdog *dev = devid; atomic_io_modify(dev->reg + TIMER_A370_STATUS, TIMER1_STATUS_BIT, 0); watchdog_notify_pretimeout(&dev->wdt); return IRQ_HANDLED; } /* * The original devicetree binding for this driver specified only * one memory resource, so in order to keep DT backwards compatibility * we try to fallback to a hardcoded register address, if the resource * is missing from the devicetree. */ static void __iomem *orion_wdt_ioremap_rstout(struct platform_device *pdev, phys_addr_t internal_regs) { struct resource *res; phys_addr_t rstout; res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) return devm_ioremap(&pdev->dev, res->start, resource_size(res)); rstout = internal_regs + ORION_RSTOUT_MASK_OFFSET; WARN(1, FW_BUG "falling back to hardcoded RSTOUT reg %pa\n", &rstout); return devm_ioremap(&pdev->dev, rstout, 0x4); } static const struct orion_watchdog_data orion_data = { .rstout_enable_bit = BIT(1), .wdt_enable_bit = BIT(4), .wdt_counter_offset = 0x24, .clock_init = orion_wdt_clock_init, .enabled = orion_enabled, .start = orion_start, .stop = orion_stop, }; static const struct orion_watchdog_data armada370_data = { .rstout_enable_bit = BIT(8), .wdt_enable_bit = BIT(8), .wdt_counter_offset = 0x34, .clock_init = armada370_wdt_clock_init, .enabled = orion_enabled, .start = armada370_start, .stop = armada370_stop, }; static const struct orion_watchdog_data armadaxp_data = { .rstout_enable_bit = BIT(8), .wdt_enable_bit = BIT(8), .wdt_counter_offset = 0x34, .clock_init = armadaxp_wdt_clock_init, .enabled = orion_enabled, .start = armada370_start, .stop = armada370_stop, }; static const struct orion_watchdog_data armada375_data = { .rstout_enable_bit = BIT(8), .rstout_mask_bit = BIT(10), .wdt_enable_bit = BIT(8), .wdt_counter_offset = 0x34, .clock_init = armada375_wdt_clock_init, .enabled = armada375_enabled, .start = armada375_start, .stop = armada375_stop, }; static const struct orion_watchdog_data armada380_data = { .rstout_enable_bit = BIT(8), .rstout_mask_bit = BIT(10), .wdt_enable_bit = BIT(8), .wdt_counter_offset = 0x34, .clock_init = armadaxp_wdt_clock_init, .enabled = armada375_enabled, .start = armada375_start, .stop = armada375_stop, }; static const struct of_device_id orion_wdt_of_match_table[] = { { .compatible = "marvell,orion-wdt", .data = &orion_data, }, { .compatible = "marvell,armada-370-wdt", .data = &armada370_data, }, { .compatible = "marvell,armada-xp-wdt", .data = &armadaxp_data, }, { .compatible = "marvell,armada-375-wdt", .data = &armada375_data, }, { .compatible = "marvell,armada-380-wdt", .data = &armada380_data, }, {}, }; MODULE_DEVICE_TABLE(of, orion_wdt_of_match_table); static int orion_wdt_get_regs(struct platform_device *pdev, struct orion_watchdog *dev) { struct device_node *node = pdev->dev.of_node; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; dev->reg = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!dev->reg) return -ENOMEM; /* Each supported compatible has some RSTOUT register quirk */ if (of_device_is_compatible(node, "marvell,orion-wdt")) { dev->rstout = orion_wdt_ioremap_rstout(pdev, res->start & INTERNAL_REGS_MASK); if (!dev->rstout) return -ENODEV; } else if (of_device_is_compatible(node, "marvell,armada-370-wdt") || of_device_is_compatible(node, "marvell,armada-xp-wdt")) { /* Dedicated RSTOUT register, can be requested. */ dev->rstout = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(dev->rstout)) return PTR_ERR(dev->rstout); } else if (of_device_is_compatible(node, "marvell,armada-375-wdt") || of_device_is_compatible(node, "marvell,armada-380-wdt")) { /* Dedicated RSTOUT register, can be requested. */ dev->rstout = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(dev->rstout)) return PTR_ERR(dev->rstout); res = platform_get_resource(pdev, IORESOURCE_MEM, 2); if (!res) return -ENODEV; dev->rstout_mask = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!dev->rstout_mask) return -ENOMEM; } else { return -ENODEV; } return 0; } static int orion_wdt_probe(struct platform_device *pdev) { struct orion_watchdog *dev; const struct of_device_id *match; unsigned int wdt_max_duration; /* (seconds) */ int ret, irq; dev = devm_kzalloc(&pdev->dev, sizeof(struct orion_watchdog), GFP_KERNEL); if (!dev) return -ENOMEM; match = of_match_device(orion_wdt_of_match_table, &pdev->dev); if (!match) /* Default legacy match */ match = &orion_wdt_of_match_table[0]; dev->wdt.info = &orion_wdt_info; dev->wdt.ops = &orion_wdt_ops; dev->wdt.min_timeout = 1; dev->data = match->data; ret = orion_wdt_get_regs(pdev, dev); if (ret) return ret; ret = dev->data->clock_init(pdev, dev); if (ret) { dev_err(&pdev->dev, "cannot initialize clock\n"); return ret; } wdt_max_duration = WDT_MAX_CYCLE_COUNT / dev->clk_rate; dev->wdt.timeout = wdt_max_duration; dev->wdt.max_timeout = wdt_max_duration; dev->wdt.parent = &pdev->dev; watchdog_init_timeout(&dev->wdt, heartbeat, &pdev->dev); platform_set_drvdata(pdev, &dev->wdt); watchdog_set_drvdata(&dev->wdt, dev); /* * Let's make sure the watchdog is fully stopped, unless it's * explicitly enabled. This may be the case if the module was * removed and re-inserted, or if the bootloader explicitly * set a running watchdog before booting the kernel. */ if (!orion_wdt_enabled(&dev->wdt)) orion_wdt_stop(&dev->wdt); else set_bit(WDOG_HW_RUNNING, &dev->wdt.status); /* Request the IRQ only after the watchdog is disabled */ irq = platform_get_irq_optional(pdev, 0); if (irq > 0) { /* * Not all supported platforms specify an interrupt for the * watchdog, so let's make it optional. */ ret = devm_request_irq(&pdev->dev, irq, orion_wdt_irq, 0, pdev->name, dev); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ\n"); goto disable_clk; } } /* Optional 2nd interrupt for pretimeout */ irq = platform_get_irq_optional(pdev, 1); if (irq > 0) { orion_wdt_info.options |= WDIOF_PRETIMEOUT; ret = devm_request_irq(&pdev->dev, irq, orion_wdt_pre_irq, 0, pdev->name, dev); if (ret < 0) { dev_err(&pdev->dev, "failed to request IRQ\n"); goto disable_clk; } } watchdog_set_nowayout(&dev->wdt, nowayout); ret = watchdog_register_device(&dev->wdt); if (ret) goto disable_clk; pr_info("Initial timeout %d sec%s\n", dev->wdt.timeout, nowayout ? ", nowayout" : ""); return 0; disable_clk: clk_disable_unprepare(dev->clk); clk_put(dev->clk); return ret; } static void orion_wdt_remove(struct platform_device *pdev) { struct watchdog_device *wdt_dev = platform_get_drvdata(pdev); struct orion_watchdog *dev = watchdog_get_drvdata(wdt_dev); watchdog_unregister_device(wdt_dev); clk_disable_unprepare(dev->clk); clk_put(dev->clk); } static void orion_wdt_shutdown(struct platform_device *pdev) { struct watchdog_device *wdt_dev = platform_get_drvdata(pdev); orion_wdt_stop(wdt_dev); } static struct platform_driver orion_wdt_driver = { .probe = orion_wdt_probe, .remove = orion_wdt_remove, .shutdown = orion_wdt_shutdown, .driver = { .name = "orion_wdt", .of_match_table = orion_wdt_of_match_table, }, }; module_platform_driver(orion_wdt_driver); MODULE_AUTHOR("Sylver Bruneau <[email protected]>"); MODULE_DESCRIPTION("Orion Processor Watchdog"); module_param(heartbeat, int, 0); MODULE_PARM_DESC(heartbeat, "Initial watchdog heartbeat in seconds"); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:orion_wdt");
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2020 Mellanox Technologies Inc. All rights reserved. */ #include "mlx5_core.h" #include "eswitch.h" #include "helper.h" #include "ofld.h" static int acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport); static bool esw_acl_ingress_prio_tag_enabled(struct mlx5_eswitch *esw, const struct mlx5_vport *vport) { return (MLX5_CAP_GEN(esw->dev, prio_tag_required) && mlx5_eswitch_is_vf_vport(esw, vport->vport)); } static int esw_acl_ingress_prio_tag_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { struct mlx5_flow_act flow_act = {}; struct mlx5_flow_spec *spec; int err = 0; /* For prio tag mode, there is only 1 FTEs: * 1) Untagged packets - push prio tag VLAN and modify metadata if * required, allow * Unmatched traffic is allowed by default */ spec = kvzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; /* Untagged packets - push prio tag VLAN, allow */ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag); MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 0); spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH | MLX5_FLOW_CONTEXT_ACTION_ALLOW; flow_act.vlan[0].ethtype = ETH_P_8021Q; flow_act.vlan[0].vid = 0; flow_act.vlan[0].prio = 0; if (vport->ingress.offloads.modify_metadata_rule) { flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; } vport->ingress.allow_rule = mlx5_add_flow_rules(vport->ingress.acl, spec, &flow_act, NULL, 0); if (IS_ERR(vport->ingress.allow_rule)) { err = PTR_ERR(vport->ingress.allow_rule); esw_warn(esw->dev, "vport[%d] configure ingress untagged allow rule, err(%d)\n", vport->vport, err); vport->ingress.allow_rule = NULL; } kvfree(spec); return err; } static int esw_acl_ingress_mod_metadata_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { u8 action[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; struct mlx5_flow_act flow_act = {}; int err = 0; u32 key; key = mlx5_eswitch_get_vport_metadata_for_match(esw, vport->vport); key >>= ESW_SOURCE_PORT_METADATA_OFFSET; MLX5_SET(set_action_in, action, action_type, MLX5_ACTION_TYPE_SET); MLX5_SET(set_action_in, action, field, MLX5_ACTION_IN_FIELD_METADATA_REG_C_0); MLX5_SET(set_action_in, action, data, key); MLX5_SET(set_action_in, action, offset, ESW_SOURCE_PORT_METADATA_OFFSET); MLX5_SET(set_action_in, action, length, ESW_SOURCE_PORT_METADATA_BITS); vport->ingress.offloads.modify_metadata = mlx5_modify_header_alloc(esw->dev, MLX5_FLOW_NAMESPACE_ESW_INGRESS, 1, action); if (IS_ERR(vport->ingress.offloads.modify_metadata)) { err = PTR_ERR(vport->ingress.offloads.modify_metadata); esw_warn(esw->dev, "failed to alloc modify header for vport %d ingress acl (%d)\n", vport->vport, err); return err; } flow_act.action = MLX5_FLOW_CONTEXT_ACTION_MOD_HDR | MLX5_FLOW_CONTEXT_ACTION_ALLOW; flow_act.modify_hdr = vport->ingress.offloads.modify_metadata; flow_act.fg = vport->ingress.offloads.metadata_allmatch_grp; vport->ingress.offloads.modify_metadata_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0); if (IS_ERR(vport->ingress.offloads.modify_metadata_rule)) { err = PTR_ERR(vport->ingress.offloads.modify_metadata_rule); esw_warn(esw->dev, "failed to add setting metadata rule for vport %d ingress acl, err(%d)\n", vport->vport, err); mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); vport->ingress.offloads.modify_metadata_rule = NULL; } return err; } static void esw_acl_ingress_mod_metadata_destroy(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (!vport->ingress.offloads.modify_metadata_rule) return; mlx5_del_flow_rules(vport->ingress.offloads.modify_metadata_rule); mlx5_modify_header_dealloc(esw->dev, vport->ingress.offloads.modify_metadata); vport->ingress.offloads.modify_metadata_rule = NULL; } static int esw_acl_ingress_src_port_drop_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { struct mlx5_flow_act flow_act = {}; struct mlx5_flow_handle *flow_rule; bool created = false; int err = 0; if (!vport->ingress.acl) { err = acl_ingress_ofld_setup(esw, vport); if (err) return err; created = true; } flow_act.action = MLX5_FLOW_CONTEXT_ACTION_DROP; flow_act.fg = vport->ingress.offloads.drop_grp; flow_rule = mlx5_add_flow_rules(vport->ingress.acl, NULL, &flow_act, NULL, 0); if (IS_ERR(flow_rule)) { err = PTR_ERR(flow_rule); goto err_out; } vport->ingress.offloads.drop_rule = flow_rule; return 0; err_out: /* Only destroy ingress acl created in this function. */ if (created) esw_acl_ingress_ofld_cleanup(esw, vport); return err; } static void esw_acl_ingress_src_port_drop_destroy(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (!vport->ingress.offloads.drop_rule) return; mlx5_del_flow_rules(vport->ingress.offloads.drop_rule); vport->ingress.offloads.drop_rule = NULL; } static int esw_acl_ingress_ofld_rules_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { int err; if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { err = esw_acl_ingress_mod_metadata_create(esw, vport); if (err) { esw_warn(esw->dev, "vport(%d) create ingress modify metadata, err(%d)\n", vport->vport, err); return err; } } if (esw_acl_ingress_prio_tag_enabled(esw, vport)) { err = esw_acl_ingress_prio_tag_create(esw, vport); if (err) { esw_warn(esw->dev, "vport(%d) create ingress prio tag rule, err(%d)\n", vport->vport, err); goto prio_tag_err; } } return 0; prio_tag_err: esw_acl_ingress_mod_metadata_destroy(esw, vport); return err; } static void esw_acl_ingress_ofld_rules_destroy(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { esw_acl_ingress_allow_rule_destroy(vport); esw_acl_ingress_mod_metadata_destroy(esw, vport); esw_acl_ingress_src_port_drop_destroy(esw, vport); } static int esw_acl_ingress_ofld_groups_create(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in); struct mlx5_flow_group *g; void *match_criteria; u32 *flow_group_in; u32 flow_index = 0; int ret = 0; flow_group_in = kvzalloc(inlen, GFP_KERNEL); if (!flow_group_in) return -ENOMEM; if (vport->vport == MLX5_VPORT_UPLINK) { /* This group can hold an FTE to drop all traffic. * Need in case LAG is enabled. */ MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); if (IS_ERR(g)) { ret = PTR_ERR(g); esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n", vport->vport, ret); goto drop_err; } vport->ingress.offloads.drop_grp = g; flow_index++; } if (esw_acl_ingress_prio_tag_enabled(esw, vport)) { /* This group is to hold FTE to match untagged packets when prio_tag * is enabled. */ memset(flow_group_in, 0, inlen); match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria); MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS); MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); if (IS_ERR(g)) { ret = PTR_ERR(g); esw_warn(esw->dev, "vport[%d] ingress create untagged flow group, err(%d)\n", vport->vport, ret); goto prio_tag_err; } vport->ingress.offloads.metadata_prio_tag_grp = g; flow_index++; } if (mlx5_eswitch_vport_match_metadata_enabled(esw)) { /* This group holds an FTE with no match to add metadata for * tagged packets if prio-tag is enabled, or for all untagged * traffic in case prio-tag is disabled. */ memset(flow_group_in, 0, inlen); MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, flow_index); MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, flow_index); g = mlx5_create_flow_group(vport->ingress.acl, flow_group_in); if (IS_ERR(g)) { ret = PTR_ERR(g); esw_warn(esw->dev, "vport[%d] ingress create drop flow group, err(%d)\n", vport->vport, ret); goto metadata_err; } vport->ingress.offloads.metadata_allmatch_grp = g; } kvfree(flow_group_in); return 0; metadata_err: if (!IS_ERR_OR_NULL(vport->ingress.offloads.metadata_prio_tag_grp)) { mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); vport->ingress.offloads.metadata_prio_tag_grp = NULL; } prio_tag_err: if (!IS_ERR_OR_NULL(vport->ingress.offloads.drop_grp)) { mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp); vport->ingress.offloads.drop_grp = NULL; } drop_err: kvfree(flow_group_in); return ret; } static void esw_acl_ingress_ofld_groups_destroy(struct mlx5_vport *vport) { if (vport->ingress.offloads.metadata_allmatch_grp) { mlx5_destroy_flow_group(vport->ingress.offloads.metadata_allmatch_grp); vport->ingress.offloads.metadata_allmatch_grp = NULL; } if (vport->ingress.offloads.metadata_prio_tag_grp) { mlx5_destroy_flow_group(vport->ingress.offloads.metadata_prio_tag_grp); vport->ingress.offloads.metadata_prio_tag_grp = NULL; } if (vport->ingress.offloads.drop_grp) { mlx5_destroy_flow_group(vport->ingress.offloads.drop_grp); vport->ingress.offloads.drop_grp = NULL; } } static int acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { int num_ftes = 0; int err; esw_acl_ingress_allow_rule_destroy(vport); if (mlx5_eswitch_vport_match_metadata_enabled(esw)) num_ftes++; if (vport->vport == MLX5_VPORT_UPLINK) num_ftes++; if (esw_acl_ingress_prio_tag_enabled(esw, vport)) num_ftes++; vport->ingress.acl = esw_acl_table_create(esw, vport, MLX5_FLOW_NAMESPACE_ESW_INGRESS, num_ftes); if (IS_ERR(vport->ingress.acl)) { err = PTR_ERR(vport->ingress.acl); vport->ingress.acl = NULL; return err; } err = esw_acl_ingress_ofld_groups_create(esw, vport); if (err) goto group_err; esw_debug(esw->dev, "vport[%d] configure ingress rules\n", vport->vport); err = esw_acl_ingress_ofld_rules_create(esw, vport); if (err) goto rules_err; return 0; rules_err: esw_acl_ingress_ofld_groups_destroy(vport); group_err: esw_acl_ingress_table_destroy(vport); return err; } int esw_acl_ingress_ofld_setup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (!mlx5_eswitch_vport_match_metadata_enabled(esw) && !esw_acl_ingress_prio_tag_enabled(esw, vport)) return 0; return acl_ingress_ofld_setup(esw, vport); } void esw_acl_ingress_ofld_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { esw_acl_ingress_ofld_rules_destroy(esw, vport); esw_acl_ingress_ofld_groups_destroy(vport); esw_acl_ingress_table_destroy(vport); } /* Caller must hold rtnl_lock */ int mlx5_esw_acl_ingress_vport_metadata_update(struct mlx5_eswitch *esw, u16 vport_num, u32 metadata) { struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); int err; if (WARN_ON_ONCE(IS_ERR(vport))) { esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num); return PTR_ERR(vport); } esw_acl_ingress_ofld_rules_destroy(esw, vport); vport->metadata = metadata ? metadata : vport->default_metadata; /* Recreate ingress acl rules with vport->metadata */ err = esw_acl_ingress_ofld_rules_create(esw, vport); if (err) goto out; return 0; out: vport->metadata = vport->default_metadata; return err; } int mlx5_esw_acl_ingress_vport_drop_rule_create(struct mlx5_eswitch *esw, u16 vport_num) { struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); if (IS_ERR(vport)) { esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num); return PTR_ERR(vport); } return esw_acl_ingress_src_port_drop_create(esw, vport); } void mlx5_esw_acl_ingress_vport_drop_rule_destroy(struct mlx5_eswitch *esw, u16 vport_num) { struct mlx5_vport *vport = mlx5_eswitch_get_vport(esw, vport_num); if (WARN_ON_ONCE(IS_ERR(vport))) { esw_warn(esw->dev, "vport(%d) invalid!\n", vport_num); return; } esw_acl_ingress_src_port_drop_destroy(esw, vport); }
/* SPDX-License-Identifier: GPL-2.0 */ /* * USBSS device controller driver. * Trace support header file. * * Copyright (C) 2018-2019 Cadence. * * Author: Pawel Laszczak <[email protected]> */ #undef TRACE_SYSTEM #define TRACE_SYSTEM cdns3 #if !defined(__LINUX_CDNS3_TRACE) || defined(TRACE_HEADER_MULTI_READ) #define __LINUX_CDNS3_TRACE #include <linux/types.h> #include <linux/tracepoint.h> #include <asm/byteorder.h> #include <linux/usb/ch9.h> #include "core.h" #include "cdns3-gadget.h" #include "cdns3-debug.h" #define CDNS3_MSG_MAX 500 TRACE_EVENT(cdns3_halt, TP_PROTO(struct cdns3_endpoint *ep_priv, u8 halt, u8 flush), TP_ARGS(ep_priv, halt, flush), TP_STRUCT__entry( __string(name, ep_priv->name) __field(u8, halt) __field(u8, flush) ), TP_fast_assign( __assign_str(name); __entry->halt = halt; __entry->flush = flush; ), TP_printk("Halt %s for %s: %s", __entry->flush ? " and flush" : "", __get_str(name), __entry->halt ? "set" : "cleared") ); TRACE_EVENT(cdns3_wa1, TP_PROTO(struct cdns3_endpoint *ep_priv, char *msg), TP_ARGS(ep_priv, msg), TP_STRUCT__entry( __string(ep_name, ep_priv->name) __string(msg, msg) ), TP_fast_assign( __assign_str(ep_name); __assign_str(msg); ), TP_printk("WA1: %s %s", __get_str(ep_name), __get_str(msg)) ); TRACE_EVENT(cdns3_wa2, TP_PROTO(struct cdns3_endpoint *ep_priv, char *msg), TP_ARGS(ep_priv, msg), TP_STRUCT__entry( __string(ep_name, ep_priv->name) __string(msg, msg) ), TP_fast_assign( __assign_str(ep_name); __assign_str(msg); ), TP_printk("WA2: %s %s", __get_str(ep_name), __get_str(msg)) ); DECLARE_EVENT_CLASS(cdns3_log_doorbell, TP_PROTO(const char *ep_name, u32 ep_trbaddr), TP_ARGS(ep_name, ep_trbaddr), TP_STRUCT__entry( __string(name, ep_name) __field(u32, ep_trbaddr) ), TP_fast_assign( __assign_str(name); __entry->ep_trbaddr = ep_trbaddr; ), TP_printk("%s, ep_trbaddr %08x", __get_str(name), __entry->ep_trbaddr) ); DEFINE_EVENT(cdns3_log_doorbell, cdns3_doorbell_ep0, TP_PROTO(const char *ep_name, u32 ep_trbaddr), TP_ARGS(ep_name, ep_trbaddr) ); DEFINE_EVENT(cdns3_log_doorbell, cdns3_doorbell_epx, TP_PROTO(const char *ep_name, u32 ep_trbaddr), TP_ARGS(ep_name, ep_trbaddr) ); DECLARE_EVENT_CLASS(cdns3_log_usb_irq, TP_PROTO(struct cdns3_device *priv_dev, u32 usb_ists), TP_ARGS(priv_dev, usb_ists), TP_STRUCT__entry( __field(enum usb_device_speed, speed) __field(u32, usb_ists) ), TP_fast_assign( __entry->speed = cdns3_get_speed(priv_dev); __entry->usb_ists = usb_ists; ), TP_printk("%s", cdns3_decode_usb_irq(__get_buf(CDNS3_MSG_MAX), __entry->speed, __entry->usb_ists)) ); DEFINE_EVENT(cdns3_log_usb_irq, cdns3_usb_irq, TP_PROTO(struct cdns3_device *priv_dev, u32 usb_ists), TP_ARGS(priv_dev, usb_ists) ); DECLARE_EVENT_CLASS(cdns3_log_epx_irq, TP_PROTO(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep), TP_ARGS(priv_dev, priv_ep), TP_STRUCT__entry( __string(ep_name, priv_ep->name) __field(u32, ep_sts) __field(u32, ep_traddr) __field(u32, ep_last_sid) __field(u32, use_streams) ), TP_fast_assign( __assign_str(ep_name); __entry->ep_sts = readl(&priv_dev->regs->ep_sts); __entry->ep_traddr = readl(&priv_dev->regs->ep_traddr); __entry->ep_last_sid = priv_ep->last_stream_id; __entry->use_streams = priv_ep->use_streams; ), TP_printk("%s, ep_traddr: %08x ep_last_sid: %08x use_streams: %d", cdns3_decode_epx_irq(__get_buf(CDNS3_MSG_MAX), __get_str(ep_name), __entry->ep_sts), __entry->ep_traddr, __entry->ep_last_sid, __entry->use_streams) ); DEFINE_EVENT(cdns3_log_epx_irq, cdns3_epx_irq, TP_PROTO(struct cdns3_device *priv_dev, struct cdns3_endpoint *priv_ep), TP_ARGS(priv_dev, priv_ep) ); DECLARE_EVENT_CLASS(cdns3_log_ep0_irq, TP_PROTO(struct cdns3_device *priv_dev, u32 ep_sts), TP_ARGS(priv_dev, ep_sts), TP_STRUCT__entry( __field(int, ep_dir) __field(u32, ep_sts) ), TP_fast_assign( __entry->ep_dir = priv_dev->selected_ep; __entry->ep_sts = ep_sts; ), TP_printk("%s", cdns3_decode_ep0_irq(__get_buf(CDNS3_MSG_MAX), __entry->ep_dir, __entry->ep_sts)) ); DEFINE_EVENT(cdns3_log_ep0_irq, cdns3_ep0_irq, TP_PROTO(struct cdns3_device *priv_dev, u32 ep_sts), TP_ARGS(priv_dev, ep_sts) ); DECLARE_EVENT_CLASS(cdns3_log_ctrl, TP_PROTO(struct usb_ctrlrequest *ctrl), TP_ARGS(ctrl), TP_STRUCT__entry( __field(u8, bRequestType) __field(u8, bRequest) __field(u16, wValue) __field(u16, wIndex) __field(u16, wLength) ), TP_fast_assign( __entry->bRequestType = ctrl->bRequestType; __entry->bRequest = ctrl->bRequest; __entry->wValue = le16_to_cpu(ctrl->wValue); __entry->wIndex = le16_to_cpu(ctrl->wIndex); __entry->wLength = le16_to_cpu(ctrl->wLength); ), TP_printk("%s", usb_decode_ctrl(__get_buf(CDNS3_MSG_MAX), CDNS3_MSG_MAX, __entry->bRequestType, __entry->bRequest, __entry->wValue, __entry->wIndex, __entry->wLength) ) ); DEFINE_EVENT(cdns3_log_ctrl, cdns3_ctrl_req, TP_PROTO(struct usb_ctrlrequest *ctrl), TP_ARGS(ctrl) ); DECLARE_EVENT_CLASS(cdns3_log_request, TP_PROTO(struct cdns3_request *req), TP_ARGS(req), TP_STRUCT__entry( __string(name, req->priv_ep->name) __field(struct cdns3_request *, req) __field(void *, buf) __field(unsigned int, actual) __field(unsigned int, length) __field(int, status) __field(int, zero) __field(int, short_not_ok) __field(int, no_interrupt) __field(int, start_trb) __field(int, end_trb) __field(int, flags) __field(unsigned int, stream_id) ), TP_fast_assign( __assign_str(name); __entry->req = req; __entry->buf = req->request.buf; __entry->actual = req->request.actual; __entry->length = req->request.length; __entry->status = req->request.status; __entry->zero = req->request.zero; __entry->short_not_ok = req->request.short_not_ok; __entry->no_interrupt = req->request.no_interrupt; __entry->start_trb = req->start_trb; __entry->end_trb = req->end_trb; __entry->flags = req->flags; __entry->stream_id = req->request.stream_id; ), TP_printk("%s: req: %p, req buff %p, length: %u/%u %s%s%s, status: %d," " trb: [start:%d, end:%d], flags:%x SID: %u", __get_str(name), __entry->req, __entry->buf, __entry->actual, __entry->length, __entry->zero ? "Z" : "z", __entry->short_not_ok ? "S" : "s", __entry->no_interrupt ? "I" : "i", __entry->status, __entry->start_trb, __entry->end_trb, __entry->flags, __entry->stream_id ) ); DEFINE_EVENT(cdns3_log_request, cdns3_alloc_request, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); DEFINE_EVENT(cdns3_log_request, cdns3_free_request, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); DEFINE_EVENT(cdns3_log_request, cdns3_ep_queue, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); DEFINE_EVENT(cdns3_log_request, cdns3_ep_dequeue, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); DEFINE_EVENT(cdns3_log_request, cdns3_gadget_giveback, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); TRACE_EVENT(cdns3_ep0_queue, TP_PROTO(struct cdns3_device *dev_priv, struct usb_request *request), TP_ARGS(dev_priv, request), TP_STRUCT__entry( __field(int, dir) __field(int, length) ), TP_fast_assign( __entry->dir = dev_priv->ep0_data_dir; __entry->length = request->length; ), TP_printk("Queue to ep0%s length: %u", __entry->dir ? "in" : "out", __entry->length) ); DECLARE_EVENT_CLASS(cdns3_stream_split_transfer_len, TP_PROTO(struct cdns3_request *req), TP_ARGS(req), TP_STRUCT__entry( __string(name, req->priv_ep->name) __field(struct cdns3_request *, req) __field(unsigned int, length) __field(unsigned int, actual) __field(unsigned int, stream_id) ), TP_fast_assign( __assign_str(name); __entry->req = req; __entry->actual = req->request.length; __entry->length = req->request.actual; __entry->stream_id = req->request.stream_id; ), TP_printk("%s: req: %p,request length: %u actual length: %u SID: %u", __get_str(name), __entry->req, __entry->length, __entry->actual, __entry->stream_id) ); DEFINE_EVENT(cdns3_stream_split_transfer_len, cdns3_stream_transfer_split, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); DEFINE_EVENT(cdns3_stream_split_transfer_len, cdns3_stream_transfer_split_next_part, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); DECLARE_EVENT_CLASS(cdns3_log_aligned_request, TP_PROTO(struct cdns3_request *priv_req), TP_ARGS(priv_req), TP_STRUCT__entry( __string(name, priv_req->priv_ep->name) __field(struct usb_request *, req) __field(void *, buf) __field(dma_addr_t, dma) __field(void *, aligned_buf) __field(dma_addr_t, aligned_dma) __field(u32, aligned_buf_size) ), TP_fast_assign( __assign_str(name); __entry->req = &priv_req->request; __entry->buf = priv_req->request.buf; __entry->dma = priv_req->request.dma; __entry->aligned_buf = priv_req->aligned_buf->buf; __entry->aligned_dma = priv_req->aligned_buf->dma; __entry->aligned_buf_size = priv_req->aligned_buf->size; ), TP_printk("%s: req: %p, req buf %p, dma %pad a_buf %p a_dma %pad, size %d", __get_str(name), __entry->req, __entry->buf, &__entry->dma, __entry->aligned_buf, &__entry->aligned_dma, __entry->aligned_buf_size ) ); DEFINE_EVENT(cdns3_log_aligned_request, cdns3_free_aligned_request, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); DEFINE_EVENT(cdns3_log_aligned_request, cdns3_prepare_aligned_request, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); DECLARE_EVENT_CLASS(cdns3_log_map_request, TP_PROTO(struct cdns3_request *priv_req), TP_ARGS(priv_req), TP_STRUCT__entry( __string(name, priv_req->priv_ep->name) __field(struct usb_request *, req) __field(void *, buf) __field(dma_addr_t, dma) ), TP_fast_assign( __assign_str(name); __entry->req = &priv_req->request; __entry->buf = priv_req->request.buf; __entry->dma = priv_req->request.dma; ), TP_printk("%s: req: %p, req buf %p, dma %p", __get_str(name), __entry->req, __entry->buf, &__entry->dma ) ); DEFINE_EVENT(cdns3_log_map_request, cdns3_map_request, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); DEFINE_EVENT(cdns3_log_map_request, cdns3_mapped_request, TP_PROTO(struct cdns3_request *req), TP_ARGS(req) ); DECLARE_EVENT_CLASS(cdns3_log_trb, TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb), TP_ARGS(priv_ep, trb), TP_STRUCT__entry( __string(name, priv_ep->name) __field(struct cdns3_trb *, trb) __field(u32, buffer) __field(u32, length) __field(u32, control) __field(u32, type) __field(unsigned int, last_stream_id) ), TP_fast_assign( __assign_str(name); __entry->trb = trb; __entry->buffer = le32_to_cpu(trb->buffer); __entry->length = le32_to_cpu(trb->length); __entry->control = le32_to_cpu(trb->control); __entry->type = usb_endpoint_type(priv_ep->endpoint.desc); __entry->last_stream_id = priv_ep->last_stream_id; ), TP_printk("%s: trb %p, dma buf: 0x%08x, size: %ld, burst: %d ctrl: 0x%08x (%s%s%s%s%s%s%s) SID:%lu LAST_SID:%u", __get_str(name), __entry->trb, __entry->buffer, TRB_LEN(__entry->length), (u8)TRB_BURST_LEN_GET(__entry->length), __entry->control, __entry->control & TRB_CYCLE ? "C=1, " : "C=0, ", __entry->control & TRB_TOGGLE ? "T=1, " : "T=0, ", __entry->control & TRB_ISP ? "ISP, " : "", __entry->control & TRB_FIFO_MODE ? "FIFO, " : "", __entry->control & TRB_CHAIN ? "CHAIN, " : "", __entry->control & TRB_IOC ? "IOC, " : "", TRB_FIELD_TO_TYPE(__entry->control) == TRB_NORMAL ? "Normal" : "LINK", TRB_FIELD_TO_STREAMID(__entry->control), __entry->last_stream_id ) ); DEFINE_EVENT(cdns3_log_trb, cdns3_prepare_trb, TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb), TP_ARGS(priv_ep, trb) ); DEFINE_EVENT(cdns3_log_trb, cdns3_complete_trb, TP_PROTO(struct cdns3_endpoint *priv_ep, struct cdns3_trb *trb), TP_ARGS(priv_ep, trb) ); DECLARE_EVENT_CLASS(cdns3_log_ring, TP_PROTO(struct cdns3_endpoint *priv_ep), TP_ARGS(priv_ep), TP_STRUCT__entry( __dynamic_array(char, buffer, GET_TRBS_PER_SEGMENT(priv_ep->type) > TRBS_PER_SEGMENT ? CDNS3_MSG_MAX : (GET_TRBS_PER_SEGMENT(priv_ep->type) * 65) + CDNS3_MSG_MAX) ), TP_fast_assign( cdns3_dbg_ring(priv_ep, __get_str(buffer)); ), TP_printk("%s", __get_str(buffer)) ); DEFINE_EVENT(cdns3_log_ring, cdns3_ring, TP_PROTO(struct cdns3_endpoint *priv_ep), TP_ARGS(priv_ep) ); DECLARE_EVENT_CLASS(cdns3_log_ep, TP_PROTO(struct cdns3_endpoint *priv_ep), TP_ARGS(priv_ep), TP_STRUCT__entry( __string(name, priv_ep->name) __field(unsigned int, maxpacket) __field(unsigned int, maxpacket_limit) __field(unsigned int, max_streams) __field(unsigned int, use_streams) __field(unsigned int, maxburst) __field(unsigned int, flags) __field(unsigned int, dir) __field(u8, enqueue) __field(u8, dequeue) ), TP_fast_assign( __assign_str(name); __entry->maxpacket = priv_ep->endpoint.maxpacket; __entry->maxpacket_limit = priv_ep->endpoint.maxpacket_limit; __entry->max_streams = priv_ep->endpoint.max_streams; __entry->use_streams = priv_ep->use_streams; __entry->maxburst = priv_ep->endpoint.maxburst; __entry->flags = priv_ep->flags; __entry->dir = priv_ep->dir; __entry->enqueue = priv_ep->enqueue; __entry->dequeue = priv_ep->dequeue; ), TP_printk("%s: mps: %d/%d. streams: %d, stream enable: %d, burst: %d, " "enq idx: %d, deq idx: %d, flags %s%s%s%s%s%s%s%s, dir: %s", __get_str(name), __entry->maxpacket, __entry->maxpacket_limit, __entry->max_streams, __entry->use_streams, __entry->maxburst, __entry->enqueue, __entry->dequeue, __entry->flags & EP_ENABLED ? "EN | " : "", __entry->flags & EP_STALLED ? "STALLED | " : "", __entry->flags & EP_WEDGE ? "WEDGE | " : "", __entry->flags & EP_TRANSFER_STARTED ? "STARTED | " : "", __entry->flags & EP_UPDATE_EP_TRBADDR ? "UPD TRB | " : "", __entry->flags & EP_PENDING_REQUEST ? "REQ PEN | " : "", __entry->flags & EP_RING_FULL ? "RING FULL |" : "", __entry->flags & EP_CLAIMED ? "CLAIMED " : "", __entry->dir ? "IN" : "OUT" ) ); DEFINE_EVENT(cdns3_log_ep, cdns3_gadget_ep_enable, TP_PROTO(struct cdns3_endpoint *priv_ep), TP_ARGS(priv_ep) ); DEFINE_EVENT(cdns3_log_ep, cdns3_gadget_ep_disable, TP_PROTO(struct cdns3_endpoint *priv_ep), TP_ARGS(priv_ep) ); DECLARE_EVENT_CLASS(cdns3_log_request_handled, TP_PROTO(struct cdns3_request *priv_req, int current_index, int handled), TP_ARGS(priv_req, current_index, handled), TP_STRUCT__entry( __field(struct cdns3_request *, priv_req) __field(unsigned int, dma_position) __field(unsigned int, handled) __field(unsigned int, dequeue_idx) __field(unsigned int, enqueue_idx) __field(unsigned int, start_trb) __field(unsigned int, end_trb) ), TP_fast_assign( __entry->priv_req = priv_req; __entry->dma_position = current_index; __entry->handled = handled; __entry->dequeue_idx = priv_req->priv_ep->dequeue; __entry->enqueue_idx = priv_req->priv_ep->enqueue; __entry->start_trb = priv_req->start_trb; __entry->end_trb = priv_req->end_trb; ), TP_printk("Req: %p %s, DMA pos: %d, ep deq: %d, ep enq: %d," " start trb: %d, end trb: %d", __entry->priv_req, __entry->handled ? "handled" : "not handled", __entry->dma_position, __entry->dequeue_idx, __entry->enqueue_idx, __entry->start_trb, __entry->end_trb ) ); DEFINE_EVENT(cdns3_log_request_handled, cdns3_request_handled, TP_PROTO(struct cdns3_request *priv_req, int current_index, int handled), TP_ARGS(priv_req, current_index, handled) ); #endif /* __LINUX_CDNS3_TRACE */ /* this part must be outside header guard */ #undef TRACE_INCLUDE_PATH #define TRACE_INCLUDE_PATH . #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_FILE cdns3-trace #include <trace/define_trace.h>
/* * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef RDMA_USER_IOCTL_CMDS_H #define RDMA_USER_IOCTL_CMDS_H #include <linux/types.h> #include <linux/ioctl.h> /* Documentation/userspace-api/ioctl/ioctl-number.rst */ #define RDMA_IOCTL_MAGIC 0x1b #define RDMA_VERBS_IOCTL \ _IOWR(RDMA_IOCTL_MAGIC, 1, struct ib_uverbs_ioctl_hdr) enum { /* User input */ UVERBS_ATTR_F_MANDATORY = 1U << 0, /* * Valid output bit should be ignored and considered set in * mandatory fields. This bit is kernel output. */ UVERBS_ATTR_F_VALID_OUTPUT = 1U << 1, }; struct ib_uverbs_attr { __u16 attr_id; /* command specific type attribute */ __u16 len; /* only for pointers and IDRs array */ __u16 flags; /* combination of UVERBS_ATTR_F_XXXX */ union { struct { __u8 elem_id; __u8 reserved; } enum_data; __u16 reserved; } attr_data; union { /* * ptr to command, inline data, idr/fd or * ptr to __u32 array of IDRs */ __aligned_u64 data; /* Used by FD_IN and FD_OUT */ __s64 data_s64; }; }; struct ib_uverbs_ioctl_hdr { __u16 length; __u16 object_id; __u16 method_id; __u16 num_attrs; __aligned_u64 reserved1; __u32 driver_id; __u32 reserved2; struct ib_uverbs_attr attrs[]; }; #endif
/* * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DAL_DSC_H__ #define __DAL_DSC_H__ #include "dc_dsc.h" #include "dc_hw_types.h" #include "dc_types.h" /* do not include any other headers * or else it might break Edid Utility functionality. */ /* Input parameters for configuring DSC from the outside of DSC */ struct dsc_config { uint32_t pic_width; uint32_t pic_height; enum dc_pixel_encoding pixel_encoding; enum dc_color_depth color_depth; /* Bits per component */ bool is_odm; struct dc_dsc_config dc_dsc_cfg; }; /* Output parameters for configuring DSC-related part of OPTC */ struct dsc_optc_config { uint32_t slice_width; /* Slice width in pixels */ uint32_t bytes_per_pixel; /* Bytes per pixel in u3.28 format */ bool is_pixel_format_444; /* 'true' if pixel format is 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4)' */ }; struct dcn_dsc_state { uint32_t dsc_clock_en; uint32_t dsc_slice_width; uint32_t dsc_bits_per_pixel; uint32_t dsc_slice_height; uint32_t dsc_pic_width; uint32_t dsc_pic_height; uint32_t dsc_slice_bpg_offset; uint32_t dsc_chunk_size; uint32_t dsc_fw_en; uint32_t dsc_opp_source; }; /* DSC encoder capabilities * They differ from the DPCD DSC caps because they are based on AMD DSC encoder caps. */ union dsc_enc_slice_caps { struct { uint8_t NUM_SLICES_1 : 1; uint8_t NUM_SLICES_2 : 1; uint8_t NUM_SLICES_3 : 1; /* This one is not per DSC spec, but our encoder supports it */ uint8_t NUM_SLICES_4 : 1; uint8_t NUM_SLICES_8 : 1; uint8_t NUM_SLICES_12 : 1; uint8_t NUM_SLICES_16 : 1; } bits; uint8_t raw; }; struct dsc_enc_caps { uint8_t dsc_version; union dsc_enc_slice_caps slice_caps; int32_t lb_bit_depth; bool is_block_pred_supported; union dsc_color_formats color_formats; union dsc_color_depth color_depth; int32_t max_total_throughput_mps; /* Maximum total throughput with all the slices combined */ int32_t max_slice_width; uint32_t bpp_increment_div; /* bpp increment divisor, e.g. if 16, it's 1/16th of a bit */ uint32_t edp_sink_max_bits_per_pixel; bool is_dp; }; struct dsc_funcs { void (*dsc_get_enc_caps)(struct dsc_enc_caps *dsc_enc_caps, int pixel_clock_100Hz); void (*dsc_read_state)(struct display_stream_compressor *dsc, struct dcn_dsc_state *s); bool (*dsc_validate_stream)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg); void (*dsc_set_config)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, struct dsc_optc_config *dsc_optc_cfg); bool (*dsc_get_packed_pps)(struct display_stream_compressor *dsc, const struct dsc_config *dsc_cfg, uint8_t *dsc_packed_pps); void (*dsc_enable)(struct display_stream_compressor *dsc, int opp_pipe); void (*dsc_disable)(struct display_stream_compressor *dsc); void (*dsc_disconnect)(struct display_stream_compressor *dsc); void (*dsc_wait_disconnect_pending_clear)(struct display_stream_compressor *dsc); }; #endif
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2015 Pablo Neira Ayuso <[email protected]> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_offload.h> #include <net/netfilter/nf_dup_netdev.h> struct nft_dup_netdev { u8 sreg_dev; }; static void nft_dup_netdev_eval(const struct nft_expr *expr, struct nft_regs *regs, const struct nft_pktinfo *pkt) { struct nft_dup_netdev *priv = nft_expr_priv(expr); int oif = regs->data[priv->sreg_dev]; nf_dup_netdev_egress(pkt, oif); } static const struct nla_policy nft_dup_netdev_policy[NFTA_DUP_MAX + 1] = { [NFTA_DUP_SREG_DEV] = { .type = NLA_U32 }, }; static int nft_dup_netdev_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_dup_netdev *priv = nft_expr_priv(expr); if (tb[NFTA_DUP_SREG_DEV] == NULL) return -EINVAL; return nft_parse_register_load(ctx, tb[NFTA_DUP_SREG_DEV], &priv->sreg_dev, sizeof(int)); } static int nft_dup_netdev_dump(struct sk_buff *skb, const struct nft_expr *expr, bool reset) { struct nft_dup_netdev *priv = nft_expr_priv(expr); if (nft_dump_register(skb, NFTA_DUP_SREG_DEV, priv->sreg_dev)) goto nla_put_failure; return 0; nla_put_failure: return -1; } static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx, struct nft_flow_rule *flow, const struct nft_expr *expr) { const struct nft_dup_netdev *priv = nft_expr_priv(expr); int oif = ctx->regs[priv->sreg_dev].data.data[0]; return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif); } static bool nft_dup_netdev_offload_action(const struct nft_expr *expr) { return true; } static struct nft_expr_type nft_dup_netdev_type; static const struct nft_expr_ops nft_dup_netdev_ops = { .type = &nft_dup_netdev_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_dup_netdev)), .eval = nft_dup_netdev_eval, .init = nft_dup_netdev_init, .dump = nft_dup_netdev_dump, .reduce = NFT_REDUCE_READONLY, .offload = nft_dup_netdev_offload, .offload_action = nft_dup_netdev_offload_action, }; static struct nft_expr_type nft_dup_netdev_type __read_mostly = { .family = NFPROTO_NETDEV, .name = "dup", .ops = &nft_dup_netdev_ops, .policy = nft_dup_netdev_policy, .maxattr = NFTA_DUP_MAX, .owner = THIS_MODULE, }; static int __init nft_dup_netdev_module_init(void) { return nft_register_expr(&nft_dup_netdev_type); } static void __exit nft_dup_netdev_module_exit(void) { nft_unregister_expr(&nft_dup_netdev_type); } module_init(nft_dup_netdev_module_init); module_exit(nft_dup_netdev_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pablo Neira Ayuso <[email protected]>"); MODULE_ALIAS_NFT_AF_EXPR(5, "dup"); MODULE_DESCRIPTION("nftables netdev packet duplication support");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_X86_EMULATE_PREFIX_H #define _ASM_X86_EMULATE_PREFIX_H /* * Virt escape sequences to trigger instruction emulation; * ideally these would decode to 'whole' instruction and not destroy * the instruction stream; sadly this is not true for the 'kvm' one :/ */ #define __XEN_EMULATE_PREFIX 0x0f,0x0b,0x78,0x65,0x6e /* ud2 ; .ascii "xen" */ #define __KVM_EMULATE_PREFIX 0x0f,0x0b,0x6b,0x76,0x6d /* ud2 ; .ascii "kvm" */ #endif
// SPDX-License-Identifier: GPL-2.0-or-later /* * smssdio.c - Siano 1xxx SDIO interface driver * * Copyright 2008 Pierre Ossman * * Based on code by Siano Mobile Silicon, Inc., * Copyright (C) 2006-2008, Uri Shkolnik * * This hardware is a bit odd in that all transfers should be done * to/from the SMSSDIO_DATA register, yet the "increase address" bit * always needs to be set. * * Also, buffers from the card are always aligned to 128 byte * boundaries. */ /* * General cleanup notes: * * - only typedefs should be name *_t * * - use ERR_PTR and friends for smscore_register_device() * * - smscore_getbuffer should zero fields * * Fix stop command */ #include "smscoreapi.h" #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/delay.h> #include <linux/mmc/card.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/module.h> #include "sms-cards.h" #include "smsendian.h" /* Registers */ #define SMSSDIO_DATA 0x00 #define SMSSDIO_INT 0x04 #define SMSSDIO_BLOCK_SIZE 128 static const struct sdio_device_id smssdio_ids[] = { {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_STELLAR), .driver_data = SMS1XXX_BOARD_SIANO_STELLAR}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_A0), .driver_data = SMS1XXX_BOARD_SIANO_NOVA_A}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_NOVA_B0), .driver_data = SMS1XXX_BOARD_SIANO_NOVA_B}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VEGA_A0), .driver_data = SMS1XXX_BOARD_SIANO_VEGA}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_VENICE), .driver_data = SMS1XXX_BOARD_SIANO_VEGA}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_MING), .driver_data = SMS1XXX_BOARD_SIANO_MING}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_PELE), .driver_data = SMS1XXX_BOARD_SIANO_PELE}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_RIO), .driver_data = SMS1XXX_BOARD_SIANO_RIO}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_DENVER_2160), .driver_data = SMS1XXX_BOARD_SIANO_DENVER_2160}, {SDIO_DEVICE(SDIO_VENDOR_ID_SIANO, SDIO_DEVICE_ID_SIANO_DENVER_1530), .driver_data = SMS1XXX_BOARD_SIANO_DENVER_1530}, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(sdio, smssdio_ids); struct smssdio_device { struct sdio_func *func; struct smscore_device_t *coredev; struct smscore_buffer_t *split_cb; }; /*******************************************************************/ /* Siano core callbacks */ /*******************************************************************/ static int smssdio_sendrequest(void *context, void *buffer, size_t size) { int ret = 0; struct smssdio_device *smsdev; smsdev = context; sdio_claim_host(smsdev->func); smsendian_handle_tx_message((struct sms_msg_data *) buffer); while (size >= smsdev->func->cur_blksize) { ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA, buffer, smsdev->func->cur_blksize); if (ret) goto out; buffer += smsdev->func->cur_blksize; size -= smsdev->func->cur_blksize; } if (size) { ret = sdio_memcpy_toio(smsdev->func, SMSSDIO_DATA, buffer, size); } out: sdio_release_host(smsdev->func); return ret; } /*******************************************************************/ /* SDIO callbacks */ /*******************************************************************/ static void smssdio_interrupt(struct sdio_func *func) { int ret; struct smssdio_device *smsdev; struct smscore_buffer_t *cb; struct sms_msg_hdr *hdr; size_t size; smsdev = sdio_get_drvdata(func); /* * The interrupt register has no defined meaning. It is just * a way of turning of the level triggered interrupt. */ (void)sdio_readb(func, SMSSDIO_INT, &ret); if (ret) { pr_err("Unable to read interrupt register!\n"); return; } if (smsdev->split_cb == NULL) { cb = smscore_getbuffer(smsdev->coredev); if (!cb) { pr_err("Unable to allocate data buffer!\n"); return; } ret = sdio_memcpy_fromio(smsdev->func, cb->p, SMSSDIO_DATA, SMSSDIO_BLOCK_SIZE); if (ret) { pr_err("Error %d reading initial block!\n", ret); return; } hdr = cb->p; if (hdr->msg_flags & MSG_HDR_FLAG_SPLIT_MSG) { smsdev->split_cb = cb; return; } if (hdr->msg_length > smsdev->func->cur_blksize) size = hdr->msg_length - smsdev->func->cur_blksize; else size = 0; } else { cb = smsdev->split_cb; hdr = cb->p; size = hdr->msg_length - sizeof(struct sms_msg_hdr); smsdev->split_cb = NULL; } if (size) { void *buffer; buffer = cb->p + (hdr->msg_length - size); size = ALIGN(size, SMSSDIO_BLOCK_SIZE); BUG_ON(smsdev->func->cur_blksize != SMSSDIO_BLOCK_SIZE); /* * First attempt to transfer all of it in one go... */ ret = sdio_memcpy_fromio(smsdev->func, buffer, SMSSDIO_DATA, size); if (ret && ret != -EINVAL) { smscore_putbuffer(smsdev->coredev, cb); pr_err("Error %d reading data from card!\n", ret); return; } /* * ..then fall back to one block at a time if that is * not possible... * * (we have to do this manually because of the * problem with the "increase address" bit) */ if (ret == -EINVAL) { while (size) { ret = sdio_memcpy_fromio(smsdev->func, buffer, SMSSDIO_DATA, smsdev->func->cur_blksize); if (ret) { smscore_putbuffer(smsdev->coredev, cb); pr_err("Error %d reading data from card!\n", ret); return; } buffer += smsdev->func->cur_blksize; if (size > smsdev->func->cur_blksize) size -= smsdev->func->cur_blksize; else size = 0; } } } cb->size = hdr->msg_length; cb->offset = 0; smsendian_handle_rx_message((struct sms_msg_data *) cb->p); smscore_onresponse(smsdev->coredev, cb); } static int smssdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret; int board_id; struct smssdio_device *smsdev; struct smsdevice_params_t params; board_id = id->driver_data; smsdev = kzalloc(sizeof(struct smssdio_device), GFP_KERNEL); if (!smsdev) return -ENOMEM; smsdev->func = func; memset(&params, 0, sizeof(struct smsdevice_params_t)); params.device = &func->dev; params.buffer_size = 0x5000; /* ?? */ params.num_buffers = 22; /* ?? */ params.context = smsdev; snprintf(params.devpath, sizeof(params.devpath), "sdio\\%s", sdio_func_id(func)); params.sendrequest_handler = smssdio_sendrequest; params.device_type = sms_get_board(board_id)->type; if (params.device_type != SMS_STELLAR) params.flags |= SMS_DEVICE_FAMILY2; else { /* * FIXME: Stellar needs special handling... */ ret = -ENODEV; goto free; } ret = smscore_register_device(&params, &smsdev->coredev, GFP_DMA, NULL); if (ret < 0) goto free; smscore_set_board_id(smsdev->coredev, board_id); sdio_claim_host(func); ret = sdio_enable_func(func); if (ret) goto release; ret = sdio_set_block_size(func, SMSSDIO_BLOCK_SIZE); if (ret) goto disable; ret = sdio_claim_irq(func, smssdio_interrupt); if (ret) goto disable; sdio_set_drvdata(func, smsdev); sdio_release_host(func); ret = smscore_start_device(smsdev->coredev); if (ret < 0) goto reclaim; return 0; reclaim: sdio_claim_host(func); sdio_release_irq(func); disable: sdio_disable_func(func); release: sdio_release_host(func); smscore_unregister_device(smsdev->coredev); free: kfree(smsdev); return ret; } static void smssdio_remove(struct sdio_func *func) { struct smssdio_device *smsdev; smsdev = sdio_get_drvdata(func); /* FIXME: racy! */ if (smsdev->split_cb) smscore_putbuffer(smsdev->coredev, smsdev->split_cb); smscore_unregister_device(smsdev->coredev); sdio_claim_host(func); sdio_release_irq(func); sdio_disable_func(func); sdio_release_host(func); kfree(smsdev); } static struct sdio_driver smssdio_driver = { .name = "smssdio", .id_table = smssdio_ids, .probe = smssdio_probe, .remove = smssdio_remove, }; module_sdio_driver(smssdio_driver); MODULE_DESCRIPTION("Siano SMS1xxx SDIO driver"); MODULE_AUTHOR("Pierre Ossman"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * Power off through MediaTek PMIC * * Copyright (C) 2018 MediaTek Inc. * * Author: Sean Wang <[email protected]> * */ #include <linux/err.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/mfd/mt6397/core.h> #include <linux/mfd/mt6397/rtc.h> #include <linux/reboot.h> struct mt6323_pwrc { struct device *dev; struct regmap *regmap; u32 base; }; static int mt6323_do_pwroff(struct sys_off_data *data) { struct mt6323_pwrc *pwrc = data->cb_data; unsigned int val; int ret; regmap_write(pwrc->regmap, pwrc->base + RTC_BBPU, RTC_BBPU_KEY); regmap_write(pwrc->regmap, pwrc->base + RTC_WRTGR_MT6323, 1); ret = regmap_read_poll_timeout(pwrc->regmap, pwrc->base + RTC_BBPU, val, !(val & RTC_BBPU_CBUSY), MTK_RTC_POLL_DELAY_US, MTK_RTC_POLL_TIMEOUT); if (ret) dev_err(pwrc->dev, "failed to write BBPU: %d\n", ret); /* Wait some time until system down, otherwise, notice with a warn */ mdelay(1000); WARN_ONCE(1, "Unable to power off system\n"); return NOTIFY_DONE; } static int mt6323_pwrc_probe(struct platform_device *pdev) { struct mt6397_chip *mt6397_chip = dev_get_drvdata(pdev->dev.parent); struct mt6323_pwrc *pwrc; struct resource *res; int ret; pwrc = devm_kzalloc(&pdev->dev, sizeof(*pwrc), GFP_KERNEL); if (!pwrc) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -EINVAL; pwrc->base = res->start; pwrc->regmap = mt6397_chip->regmap; pwrc->dev = &pdev->dev; ret = devm_register_sys_off_handler(pwrc->dev, SYS_OFF_MODE_POWER_OFF, SYS_OFF_PRIO_DEFAULT, mt6323_do_pwroff, pwrc); if (ret) return dev_err_probe(pwrc->dev, ret, "failed to register power-off handler\n"); return 0; } static const struct of_device_id mt6323_pwrc_dt_match[] = { { .compatible = "mediatek,mt6323-pwrc" }, {}, }; MODULE_DEVICE_TABLE(of, mt6323_pwrc_dt_match); static struct platform_driver mt6323_pwrc_driver = { .probe = mt6323_pwrc_probe, .driver = { .name = "mt6323-pwrc", .of_match_table = mt6323_pwrc_dt_match, }, }; module_platform_driver(mt6323_pwrc_driver); MODULE_DESCRIPTION("Poweroff driver for MT6323 PMIC"); MODULE_AUTHOR("Sean Wang <[email protected]>");
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright (C) 2019 PHYTEC Messtechnik GmbH * Author: Stefan Riedmueller <[email protected]> */ #include "imx6ul-phytec-segin.dtsi" / { model = "PHYTEC phyBOARD-Segin i.MX6 ULL"; compatible = "phytec,imx6ull-pbacd-10", "phytec,imx6ull-pcl063","fsl,imx6ull"; }; &iomuxc { /delete-node/ flexcan1engrp; /delete-node/ rtcintgrp; }; &iomuxc_snvs { princtrl_flexcan1_en: flexcan1engrp { fsl,pins = < MX6ULL_PAD_SNVS_TAMPER2__GPIO5_IO02 0x17059 >; }; pinctrl_rtc_int: rtcintgrp { fsl,pins = < MX6ULL_PAD_SNVS_TAMPER1__GPIO5_IO01 0x17059 >; }; };
// SPDX-License-Identifier: GPL-2.0 /* * arch/sh/boards/renesas/r7780rp/psw.c * * push switch support for RDBRP-1/RDBREVRP-1 debug boards. * * Copyright (C) 2006 Paul Mundt */ #include <linux/io.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <mach/highlander.h> #include <asm/push-switch.h> static irqreturn_t psw_irq_handler(int irq, void *arg) { struct platform_device *pdev = arg; struct push_switch *psw = platform_get_drvdata(pdev); struct push_switch_platform_info *psw_info = pdev->dev.platform_data; unsigned int l, mask; int ret = 0; l = __raw_readw(PA_DBSW); /* Nothing to do if there's no state change */ if (psw->state) { ret = 1; goto out; } mask = l & 0x70; /* Figure out who raised it */ if (mask & (1 << psw_info->bit)) { psw->state = !!(mask & (1 << psw_info->bit)); if (psw->state) /* debounce */ mod_timer(&psw->debounce, jiffies + 50); ret = 1; } out: /* Clear the switch IRQs */ l |= (0x7 << 12); __raw_writew(l, PA_DBSW); return IRQ_RETVAL(ret); } static struct resource psw_resources[] = { [0] = { .start = IRQ_PSW, .flags = IORESOURCE_IRQ, }, }; static struct push_switch_platform_info s2_platform_data = { .name = "s2", .bit = 6, .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_SHARED, .irq_handler = psw_irq_handler, }; static struct platform_device s2_switch_device = { .name = "push-switch", .id = 0, .num_resources = ARRAY_SIZE(psw_resources), .resource = psw_resources, .dev = { .platform_data = &s2_platform_data, }, }; static struct push_switch_platform_info s3_platform_data = { .name = "s3", .bit = 5, .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_SHARED, .irq_handler = psw_irq_handler, }; static struct platform_device s3_switch_device = { .name = "push-switch", .id = 1, .num_resources = ARRAY_SIZE(psw_resources), .resource = psw_resources, .dev = { .platform_data = &s3_platform_data, }, }; static struct push_switch_platform_info s4_platform_data = { .name = "s4", .bit = 4, .irq_flags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_SHARED, .irq_handler = psw_irq_handler, }; static struct platform_device s4_switch_device = { .name = "push-switch", .id = 2, .num_resources = ARRAY_SIZE(psw_resources), .resource = psw_resources, .dev = { .platform_data = &s4_platform_data, }, }; static struct platform_device *psw_devices[] = { &s2_switch_device, &s3_switch_device, &s4_switch_device, }; static int __init psw_init(void) { return platform_add_devices(psw_devices, ARRAY_SIZE(psw_devices)); } module_init(psw_init);
// SPDX-License-Identifier: GPL-2.0-only #include <stddef.h> /* * Override the "basic" built-in string helpers so that they can be used in * guest code. KVM selftests don't support dynamic loading in guest code and * will jump into the weeds if the compiler decides to insert an out-of-line * call via the PLT. */ int memcmp(const void *cs, const void *ct, size_t count) { const unsigned char *su1, *su2; int res = 0; for (su1 = cs, su2 = ct; 0 < count; ++su1, ++su2, count--) { if ((res = *su1 - *su2) != 0) break; } return res; } void *memcpy(void *dest, const void *src, size_t count) { char *tmp = dest; const char *s = src; while (count--) *tmp++ = *s++; return dest; } void *memset(void *s, int c, size_t count) { char *xs = s; while (count--) *xs++ = c; return s; } size_t strnlen(const char *s, size_t count) { const char *sc; for (sc = s; count-- && *sc != '\0'; ++sc) /* nothing */; return sc - s; }
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2015 - 2016 Thomas Körper, esd electronic system design gmbh * Copyright (C) 2017 - 2023 Stefan Mätje, esd electronics gmbh */ #include <linux/can/dev.h> #include <linux/can.h> #include <linux/can/netlink.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/ethtool.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include "esdacc.h" #define ESD_PCI_DEVICE_ID_PCIE402 0x0402 #define PCI402_FPGA_VER_MIN 0x003d #define PCI402_MAX_CORES 6 #define PCI402_BAR 0 #define PCI402_IO_OV_OFFS 0 #define PCI402_IO_PCIEP_OFFS 0x10000 #define PCI402_IO_LEN_TOTAL 0x20000 #define PCI402_IO_LEN_CORE 0x2000 #define PCI402_PCICFG_MSICAP 0x50 #define PCI402_DMA_MASK DMA_BIT_MASK(32) #define PCI402_DMA_SIZE ALIGN(0x10000, PAGE_SIZE) #define PCI402_PCIEP_OF_INT_ENABLE 0x0050 #define PCI402_PCIEP_OF_BM_ADDR_LO 0x1000 #define PCI402_PCIEP_OF_BM_ADDR_HI 0x1004 #define PCI402_PCIEP_OF_MSI_ADDR_LO 0x1008 #define PCI402_PCIEP_OF_MSI_ADDR_HI 0x100c struct pci402_card { /* Actually mapped io space, all other iomem derived from this */ void __iomem *addr; void __iomem *addr_pciep; void *dma_buf; dma_addr_t dma_hnd; struct acc_ov ov; struct acc_core *cores; bool msi_enabled; }; /* The BTR register capabilities described by the can_bittiming_const structures * below are valid since esdACC version 0x0032. */ /* Used if the esdACC FPGA is built as CAN-Classic version. */ static const struct can_bittiming_const pci402_bittiming_const = { .name = "esd_402", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 512, .brp_inc = 1, }; /* Used if the esdACC FPGA is built as CAN-FD version. */ static const struct can_bittiming_const pci402_bittiming_const_canfd = { .name = "esd_402fd", .tseg1_min = 1, .tseg1_max = 256, .tseg2_min = 1, .tseg2_max = 128, .sjw_max = 128, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; static const struct net_device_ops pci402_acc_netdev_ops = { .ndo_open = acc_open, .ndo_stop = acc_close, .ndo_start_xmit = acc_start_xmit, .ndo_change_mtu = can_change_mtu, .ndo_eth_ioctl = can_eth_ioctl_hwts, }; static const struct ethtool_ops pci402_acc_ethtool_ops = { .get_ts_info = can_ethtool_op_get_ts_info_hwts, }; static irqreturn_t pci402_interrupt(int irq, void *dev_id) { struct pci_dev *pdev = dev_id; struct pci402_card *card = pci_get_drvdata(pdev); irqreturn_t irq_status; irq_status = acc_card_interrupt(&card->ov, card->cores); return irq_status; } static int pci402_set_msiconfig(struct pci_dev *pdev) { struct pci402_card *card = pci_get_drvdata(pdev); u32 addr_lo_offs = 0; u32 addr_lo = 0; u32 addr_hi = 0; u32 data = 0; u16 csr = 0; int err; /* The FPGA hard IP PCIe core implements a 64-bit MSI Capability * Register Format */ err = pci_read_config_word(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_FLAGS, &csr); if (err) goto failed; err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_LO, &addr_lo); if (err) goto failed; err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_ADDRESS_HI, &addr_hi); if (err) goto failed; err = pci_read_config_dword(pdev, PCI402_PCICFG_MSICAP + PCI_MSI_DATA_64, &data); if (err) goto failed; addr_lo_offs = addr_lo & 0x0000ffff; addr_lo &= 0xffff0000; if (addr_hi) addr_lo |= 1; /* To enable 64-Bit addressing in PCIe endpoint */ if (!(csr & PCI_MSI_FLAGS_ENABLE)) { err = -EINVAL; goto failed; } iowrite32(addr_lo, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_LO); iowrite32(addr_hi, card->addr_pciep + PCI402_PCIEP_OF_MSI_ADDR_HI); acc_ov_write32(&card->ov, ACC_OV_OF_MSI_ADDRESSOFFSET, addr_lo_offs); acc_ov_write32(&card->ov, ACC_OV_OF_MSI_DATA, data); return 0; failed: pci_warn(pdev, "Error while setting MSI configuration:\n" "CSR: 0x%.4x, addr: 0x%.8x%.8x, offs: 0x%.4x, data: 0x%.8x\n", csr, addr_hi, addr_lo, addr_lo_offs, data); return err; } static int pci402_init_card(struct pci_dev *pdev) { struct pci402_card *card = pci_get_drvdata(pdev); card->ov.addr = card->addr + PCI402_IO_OV_OFFS; card->addr_pciep = card->addr + PCI402_IO_PCIEP_OFFS; acc_reset_fpga(&card->ov); acc_init_ov(&card->ov, &pdev->dev); if (card->ov.version < PCI402_FPGA_VER_MIN) { pci_err(pdev, "esdACC version (0x%.4x) outdated, please update\n", card->ov.version); return -EINVAL; } if (card->ov.timestamp_frequency != ACC_TS_FREQ_80MHZ) { pci_err(pdev, "esdACC timestamp frequency of %uHz not supported by driver. Aborted.\n", card->ov.timestamp_frequency); return -EINVAL; } if (card->ov.active_cores > PCI402_MAX_CORES) { pci_err(pdev, "Card with %u active cores not supported by driver. Aborted.\n", card->ov.active_cores); return -EINVAL; } card->cores = devm_kcalloc(&pdev->dev, card->ov.active_cores, sizeof(struct acc_core), GFP_KERNEL); if (!card->cores) return -ENOMEM; if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD) { pci_warn(pdev, "esdACC with CAN-FD feature detected. This driver doesn't support CAN-FD yet.\n"); } #ifdef __LITTLE_ENDIAN /* So card converts all busmastered data to LE for us: */ acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_ENDIAN_LITTLE); #endif return 0; } static int pci402_init_interrupt(struct pci_dev *pdev) { struct pci402_card *card = pci_get_drvdata(pdev); int err; err = pci_enable_msi(pdev); if (!err) { err = pci402_set_msiconfig(pdev); if (!err) { card->msi_enabled = true; acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_MSI_ENABLE); pci_dbg(pdev, "MSI preparation done\n"); } } err = devm_request_irq(&pdev->dev, pdev->irq, pci402_interrupt, IRQF_SHARED, dev_name(&pdev->dev), pdev); if (err) goto failure_msidis; iowrite32(1, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE); return 0; failure_msidis: if (card->msi_enabled) { acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_MSI_ENABLE); pci_disable_msi(pdev); card->msi_enabled = false; } return err; } static void pci402_finish_interrupt(struct pci_dev *pdev) { struct pci402_card *card = pci_get_drvdata(pdev); iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_INT_ENABLE); devm_free_irq(&pdev->dev, pdev->irq, pdev); if (card->msi_enabled) { acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_MSI_ENABLE); pci_disable_msi(pdev); card->msi_enabled = false; } } static int pci402_init_dma(struct pci_dev *pdev) { struct pci402_card *card = pci_get_drvdata(pdev); int err; err = dma_set_coherent_mask(&pdev->dev, PCI402_DMA_MASK); if (err) { pci_err(pdev, "DMA set mask failed!\n"); return err; } /* The esdACC DMA engine needs the DMA buffer aligned to a 64k * boundary. The DMA API guarantees to align the returned buffer to the * smallest PAGE_SIZE order which is greater than or equal to the * requested size. With PCI402_DMA_SIZE == 64kB this suffices here. */ card->dma_buf = dma_alloc_coherent(&pdev->dev, PCI402_DMA_SIZE, &card->dma_hnd, GFP_KERNEL); if (!card->dma_buf) return -ENOMEM; acc_init_bm_ptr(&card->ov, card->cores, card->dma_buf); iowrite32(card->dma_hnd, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO); iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI); pci_set_master(pdev); acc_ov_set_bits(&card->ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_BM_ENABLE); return 0; } static void pci402_finish_dma(struct pci_dev *pdev) { struct pci402_card *card = pci_get_drvdata(pdev); int i; acc_ov_clear_bits(&card->ov, ACC_OV_OF_MODE, ACC_OV_REG_MODE_MASK_BM_ENABLE); pci_clear_master(pdev); iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_LO); iowrite32(0, card->addr_pciep + PCI402_PCIEP_OF_BM_ADDR_HI); card->ov.bmfifo.messages = NULL; card->ov.bmfifo.irq_cnt = NULL; for (i = 0; i < card->ov.active_cores; i++) { struct acc_core *core = &card->cores[i]; core->bmfifo.messages = NULL; core->bmfifo.irq_cnt = NULL; } dma_free_coherent(&pdev->dev, PCI402_DMA_SIZE, card->dma_buf, card->dma_hnd); card->dma_buf = NULL; } static void pci402_unregister_core(struct acc_core *core) { netdev_info(core->netdev, "unregister\n"); unregister_candev(core->netdev); free_candev(core->netdev); core->netdev = NULL; } static int pci402_init_cores(struct pci_dev *pdev) { struct pci402_card *card = pci_get_drvdata(pdev); int err; int i; for (i = 0; i < card->ov.active_cores; i++) { struct acc_core *core = &card->cores[i]; struct acc_net_priv *priv; struct net_device *netdev; u32 fifo_config; core->addr = card->ov.addr + (i + 1) * PCI402_IO_LEN_CORE; fifo_config = acc_read32(core, ACC_CORE_OF_TXFIFO_CONFIG); core->tx_fifo_size = (fifo_config >> 24); if (core->tx_fifo_size <= 1) { pci_err(pdev, "Invalid tx_fifo_size!\n"); err = -EINVAL; goto failure; } netdev = alloc_candev(sizeof(*priv), core->tx_fifo_size); if (!netdev) { err = -ENOMEM; goto failure; } core->netdev = netdev; netdev->flags |= IFF_ECHO; netdev->dev_port = i; netdev->netdev_ops = &pci402_acc_netdev_ops; netdev->ethtool_ops = &pci402_acc_ethtool_ops; SET_NETDEV_DEV(netdev, &pdev->dev); priv = netdev_priv(netdev); priv->can.clock.freq = card->ov.core_frequency; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_CC_LEN8_DLC; if (card->ov.features & ACC_OV_REG_FEAT_MASK_DAR) priv->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; if (card->ov.features & ACC_OV_REG_FEAT_MASK_CANFD) priv->can.bittiming_const = &pci402_bittiming_const_canfd; else priv->can.bittiming_const = &pci402_bittiming_const; priv->can.do_set_bittiming = acc_set_bittiming; priv->can.do_set_mode = acc_set_mode; priv->can.do_get_berr_counter = acc_get_berr_counter; priv->core = core; priv->ov = &card->ov; err = register_candev(netdev); if (err) { free_candev(core->netdev); core->netdev = NULL; goto failure; } netdev_info(netdev, "registered\n"); } return 0; failure: for (i--; i >= 0; i--) pci402_unregister_core(&card->cores[i]); return err; } static void pci402_finish_cores(struct pci_dev *pdev) { struct pci402_card *card = pci_get_drvdata(pdev); int i; for (i = 0; i < card->ov.active_cores; i++) pci402_unregister_core(&card->cores[i]); } static int pci402_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct pci402_card *card = NULL; int err; err = pci_enable_device(pdev); if (err) return err; card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); if (!card) { err = -ENOMEM; goto failure_disable_pci; } pci_set_drvdata(pdev, card); err = pci_request_regions(pdev, pci_name(pdev)); if (err) goto failure_disable_pci; card->addr = pci_iomap(pdev, PCI402_BAR, PCI402_IO_LEN_TOTAL); if (!card->addr) { err = -ENOMEM; goto failure_release_regions; } err = pci402_init_card(pdev); if (err) goto failure_unmap; err = pci402_init_dma(pdev); if (err) goto failure_unmap; err = pci402_init_interrupt(pdev); if (err) goto failure_finish_dma; err = pci402_init_cores(pdev); if (err) goto failure_finish_interrupt; return 0; failure_finish_interrupt: pci402_finish_interrupt(pdev); failure_finish_dma: pci402_finish_dma(pdev); failure_unmap: pci_iounmap(pdev, card->addr); failure_release_regions: pci_release_regions(pdev); failure_disable_pci: pci_disable_device(pdev); return err; } static void pci402_remove(struct pci_dev *pdev) { struct pci402_card *card = pci_get_drvdata(pdev); pci402_finish_interrupt(pdev); pci402_finish_cores(pdev); pci402_finish_dma(pdev); pci_iounmap(pdev, card->addr); pci_release_regions(pdev); pci_disable_device(pdev); } static const struct pci_device_id pci402_tbl[] = { { .vendor = PCI_VENDOR_ID_ESDGMBH, .device = ESD_PCI_DEVICE_ID_PCIE402, .subvendor = PCI_VENDOR_ID_ESDGMBH, .subdevice = PCI_ANY_ID, }, { 0, } }; MODULE_DEVICE_TABLE(pci, pci402_tbl); static struct pci_driver pci402_driver = { .name = KBUILD_MODNAME, .id_table = pci402_tbl, .probe = pci402_probe, .remove = pci402_remove, }; module_pci_driver(pci402_driver); MODULE_DESCRIPTION("Socket-CAN driver for esd CAN 402 card family with esdACC core on PCIe"); MODULE_AUTHOR("Thomas Körper <[email protected]>"); MODULE_AUTHOR("Stefan Mätje <[email protected]>"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* $Id: sungem.c,v 1.44.2.22 2002/03/13 01:18:12 davem Exp $ * sungem.c: Sun GEM ethernet driver. * * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller ([email protected]) * * Support for Apple GMAC and assorted PHYs, WOL, Power Management * (C) 2001,2002,2003 Benjamin Herrenscmidt ([email protected]) * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. * * NAPI and NETPOLL support * (C) 2004 by Eric Lemoine ([email protected]) * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/random.h> #include <linux/workqueue.h> #include <linux/if_vlan.h> #include <linux/bitops.h> #include <linux/mm.h> #include <linux/gfp.h> #include <linux/of.h> #include <asm/io.h> #include <asm/byteorder.h> #include <linux/uaccess.h> #include <asm/irq.h> #ifdef CONFIG_SPARC #include <asm/idprom.h> #include <asm/prom.h> #endif #ifdef CONFIG_PPC_PMAC #include <asm/machdep.h> #include <asm/pmac_feature.h> #endif #include <linux/sungem_phy.h> #include "sungem.h" #define STRIP_FCS #define DEFAULT_MSG (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK) #define ADVERTISE_MASK (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | \ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | \ SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | \ SUPPORTED_Pause | SUPPORTED_Autoneg) #define DRV_NAME "sungem" #define DRV_VERSION "1.0" #define DRV_AUTHOR "David S. Miller <[email protected]>" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_AUTHOR "\n"; MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); MODULE_LICENSE("GPL"); #define GEM_MODULE_NAME "gem" static const struct pci_device_id gem_pci_tbl[] = { { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, /* These models only differ from the original GEM in * that their tx/rx fifos are of a different size and * they only support 10/100 speeds. -DaveM * * Apple's GMAC does support gigabit on machines with * the BCM54xx PHYs. -BenH */ { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_RIO_GEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMACP, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_GMAC2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_K2_GMAC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_SH_SUNGEM, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_IPID2_GMAC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, {0, } }; MODULE_DEVICE_TABLE(pci, gem_pci_tbl); static u16 __sungem_phy_read(struct gem *gp, int phy_addr, int reg) { u32 cmd; int limit = 10000; cmd = (1 << 30); cmd |= (2 << 28); cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; cmd |= (reg << 18) & MIF_FRAME_REGAD; cmd |= (MIF_FRAME_TAMSB); writel(cmd, gp->regs + MIF_FRAME); while (--limit) { cmd = readl(gp->regs + MIF_FRAME); if (cmd & MIF_FRAME_TALSB) break; udelay(10); } if (!limit) cmd = 0xffff; return cmd & MIF_FRAME_DATA; } static inline int _sungem_phy_read(struct net_device *dev, int mii_id, int reg) { struct gem *gp = netdev_priv(dev); return __sungem_phy_read(gp, mii_id, reg); } static inline u16 sungem_phy_read(struct gem *gp, int reg) { return __sungem_phy_read(gp, gp->mii_phy_addr, reg); } static void __sungem_phy_write(struct gem *gp, int phy_addr, int reg, u16 val) { u32 cmd; int limit = 10000; cmd = (1 << 30); cmd |= (1 << 28); cmd |= (phy_addr << 23) & MIF_FRAME_PHYAD; cmd |= (reg << 18) & MIF_FRAME_REGAD; cmd |= (MIF_FRAME_TAMSB); cmd |= (val & MIF_FRAME_DATA); writel(cmd, gp->regs + MIF_FRAME); while (limit--) { cmd = readl(gp->regs + MIF_FRAME); if (cmd & MIF_FRAME_TALSB) break; udelay(10); } } static inline void _sungem_phy_write(struct net_device *dev, int mii_id, int reg, int val) { struct gem *gp = netdev_priv(dev); __sungem_phy_write(gp, mii_id, reg, val & 0xffff); } static inline void sungem_phy_write(struct gem *gp, int reg, u16 val) { __sungem_phy_write(gp, gp->mii_phy_addr, reg, val); } static inline void gem_enable_ints(struct gem *gp) { /* Enable all interrupts but TXDONE */ writel(GREG_STAT_TXDONE, gp->regs + GREG_IMASK); } static inline void gem_disable_ints(struct gem *gp) { /* Disable all interrupts, including TXDONE */ writel(GREG_STAT_NAPI | GREG_STAT_TXDONE, gp->regs + GREG_IMASK); (void)readl(gp->regs + GREG_IMASK); /* write posting */ } static void gem_get_cell(struct gem *gp) { BUG_ON(gp->cell_enabled < 0); gp->cell_enabled++; #ifdef CONFIG_PPC_PMAC if (gp->cell_enabled == 1) { mb(); pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 1); udelay(10); } #endif /* CONFIG_PPC_PMAC */ } /* Turn off the chip's clock */ static void gem_put_cell(struct gem *gp) { BUG_ON(gp->cell_enabled <= 0); gp->cell_enabled--; #ifdef CONFIG_PPC_PMAC if (gp->cell_enabled == 0) { mb(); pmac_call_feature(PMAC_FTR_GMAC_ENABLE, gp->of_node, 0, 0); udelay(10); } #endif /* CONFIG_PPC_PMAC */ } static inline void gem_netif_stop(struct gem *gp) { netif_trans_update(gp->dev); /* prevent tx timeout */ napi_disable(&gp->napi); netif_tx_disable(gp->dev); } static inline void gem_netif_start(struct gem *gp) { /* NOTE: unconditional netif_wake_queue is only * appropriate so long as all callers are assured to * have free tx slots. */ netif_wake_queue(gp->dev); napi_enable(&gp->napi); } static void gem_schedule_reset(struct gem *gp) { gp->reset_task_pending = 1; schedule_work(&gp->reset_task); } static void gem_handle_mif_event(struct gem *gp, u32 reg_val, u32 changed_bits) { if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: mif interrupt\n", gp->dev->name); } static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 pcs_istat = readl(gp->regs + PCS_ISTAT); u32 pcs_miistat; if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: pcs interrupt, pcs_istat: 0x%x\n", gp->dev->name, pcs_istat); if (!(pcs_istat & PCS_ISTAT_LSC)) { netdev_err(dev, "PCS irq but no link status change???\n"); return 0; } /* The link status bit latches on zero, so you must * read it twice in such a case to see a transition * to the link being up. */ pcs_miistat = readl(gp->regs + PCS_MIISTAT); if (!(pcs_miistat & PCS_MIISTAT_LS)) pcs_miistat |= (readl(gp->regs + PCS_MIISTAT) & PCS_MIISTAT_LS); if (pcs_miistat & PCS_MIISTAT_ANC) { /* The remote-fault indication is only valid * when autoneg has completed. */ if (pcs_miistat & PCS_MIISTAT_RF) netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n"); else netdev_info(dev, "PCS AutoNEG complete\n"); } if (pcs_miistat & PCS_MIISTAT_LS) { netdev_info(dev, "PCS link is now up\n"); netif_carrier_on(gp->dev); } else { netdev_info(dev, "PCS link is now down\n"); netif_carrier_off(gp->dev); /* If this happens and the link timer is not running, * reset so we re-negotiate. */ if (!timer_pending(&gp->link_timer)) return 1; } return 0; } static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 txmac_stat = readl(gp->regs + MAC_TXSTAT); if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", gp->dev->name, txmac_stat); /* Defer timer expiration is quite normal, * don't even log the event. */ if ((txmac_stat & MAC_TXSTAT_DTE) && !(txmac_stat & ~MAC_TXSTAT_DTE)) return 0; if (txmac_stat & MAC_TXSTAT_URUN) { netdev_err(dev, "TX MAC xmit underrun\n"); dev->stats.tx_fifo_errors++; } if (txmac_stat & MAC_TXSTAT_MPE) { netdev_err(dev, "TX MAC max packet size error\n"); dev->stats.tx_errors++; } /* The rest are all cases of one of the 16-bit TX * counters expiring. */ if (txmac_stat & MAC_TXSTAT_NCE) dev->stats.collisions += 0x10000; if (txmac_stat & MAC_TXSTAT_ECE) { dev->stats.tx_aborted_errors += 0x10000; dev->stats.collisions += 0x10000; } if (txmac_stat & MAC_TXSTAT_LCE) { dev->stats.tx_aborted_errors += 0x10000; dev->stats.collisions += 0x10000; } /* We do not keep track of MAC_TXSTAT_FCE and * MAC_TXSTAT_PCE events. */ return 0; } /* When we get a RX fifo overflow, the RX unit in GEM is probably hung * so we do the following. * * If any part of the reset goes wrong, we return 1 and that causes the * whole chip to be reset. */ static int gem_rxmac_reset(struct gem *gp) { struct net_device *dev = gp->dev; int limit, i; u64 desc_dma; u32 val; /* First, reset & disable MAC RX. */ writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + MAC_RXRST) & MAC_RXRST_CMD)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX MAC will not reset, resetting whole chip\n"); return 1; } writel(gp->mac_rx_cfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); return 1; } /* Second, disable RX DMA. */ writel(0, gp->regs + RXDMA_CFG); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + RXDMA_CFG) & RXDMA_CFG_ENABLE)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); return 1; } mdelay(5); /* Execute RX reset command. */ writel(gp->swrst_base | GREG_SWRST_RXRST, gp->regs + GREG_SWRST); for (limit = 0; limit < 5000; limit++) { if (!(readl(gp->regs + GREG_SWRST) & GREG_SWRST_RXRST)) break; udelay(10); } if (limit == 5000) { netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); return 1; } /* Refresh the RX ring. */ for (i = 0; i < RX_RING_SIZE; i++) { struct gem_rxd *rxd = &gp->init_block->rxd[i]; if (gp->rx_skbs[i] == NULL) { netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n"); return 1; } rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); } gp->rx_new = gp->rx_old = 0; /* Now we must reprogram the rest of RX unit. */ desc_dma = (u64) gp->gblock_dvma; desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) writel(((5 & RXDMA_BLANK_IPKTS) | ((8 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); else writel(((5 & RXDMA_BLANK_IPKTS) | ((4 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); writel(val, gp->regs + RXDMA_PTHRESH); val = readl(gp->regs + RXDMA_CFG); writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); val = readl(gp->regs + MAC_RXCFG); writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); return 0; } static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 rxmac_stat = readl(gp->regs + MAC_RXSTAT); int ret = 0; if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: rxmac interrupt, rxmac_stat: 0x%x\n", gp->dev->name, rxmac_stat); if (rxmac_stat & MAC_RXSTAT_OFLW) { u32 smac = readl(gp->regs + MAC_SMACHINE); netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); dev->stats.rx_over_errors++; dev->stats.rx_fifo_errors++; ret = gem_rxmac_reset(gp); } if (rxmac_stat & MAC_RXSTAT_ACE) dev->stats.rx_frame_errors += 0x10000; if (rxmac_stat & MAC_RXSTAT_CCE) dev->stats.rx_crc_errors += 0x10000; if (rxmac_stat & MAC_RXSTAT_LCE) dev->stats.rx_length_errors += 0x10000; /* We do not track MAC_RXSTAT_FCE and MAC_RXSTAT_VCE * events. */ return ret; } static int gem_mac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 mac_cstat = readl(gp->regs + MAC_CSTAT); if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: mac interrupt, mac_cstat: 0x%x\n", gp->dev->name, mac_cstat); /* This interrupt is just for pause frame and pause * tracking. It is useful for diagnostics and debug * but probably by default we will mask these events. */ if (mac_cstat & MAC_CSTAT_PS) gp->pause_entered++; if (mac_cstat & MAC_CSTAT_PRCV) gp->pause_last_time_recvd = (mac_cstat >> 16); return 0; } static int gem_mif_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 mif_status = readl(gp->regs + MIF_STATUS); u32 reg_val, changed_bits; reg_val = (mif_status & MIF_STATUS_DATA) >> 16; changed_bits = (mif_status & MIF_STATUS_STAT); gem_handle_mif_event(gp, reg_val, changed_bits); return 0; } static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_status) { u32 pci_estat = readl(gp->regs + GREG_PCIESTAT); if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { netdev_err(dev, "PCI error [%04x]", pci_estat); if (pci_estat & GREG_PCIESTAT_BADACK) pr_cont(" <No ACK64# during ABS64 cycle>"); if (pci_estat & GREG_PCIESTAT_DTRTO) pr_cont(" <Delayed transaction timeout>"); if (pci_estat & GREG_PCIESTAT_OTHER) pr_cont(" <other>"); pr_cont("\n"); } else { pci_estat |= GREG_PCIESTAT_OTHER; netdev_err(dev, "PCI error\n"); } if (pci_estat & GREG_PCIESTAT_OTHER) { int pci_errs; /* Interrogate PCI config space for the * true cause. */ pci_errs = pci_status_get_and_clear_errors(gp->pdev); netdev_err(dev, "PCI status errors[%04x]\n", pci_errs); if (pci_errs & PCI_STATUS_PARITY) netdev_err(dev, "PCI parity error detected\n"); if (pci_errs & PCI_STATUS_SIG_TARGET_ABORT) netdev_err(dev, "PCI target abort\n"); if (pci_errs & PCI_STATUS_REC_TARGET_ABORT) netdev_err(dev, "PCI master acks target abort\n"); if (pci_errs & PCI_STATUS_REC_MASTER_ABORT) netdev_err(dev, "PCI master abort\n"); if (pci_errs & PCI_STATUS_SIG_SYSTEM_ERROR) netdev_err(dev, "PCI system error SERR#\n"); if (pci_errs & PCI_STATUS_DETECTED_PARITY) netdev_err(dev, "PCI parity error\n"); } /* For all PCI errors, we should reset the chip. */ return 1; } /* All non-normal interrupt conditions get serviced here. * Returns non-zero if we should just exit the interrupt * handler right now (ie. if we reset the card which invalidates * all of the other original irq status bits). */ static int gem_abnormal_irq(struct net_device *dev, struct gem *gp, u32 gem_status) { if (gem_status & GREG_STAT_RXNOBUF) { /* Frame arrived, no free RX buffers available. */ if (netif_msg_rx_err(gp)) printk(KERN_DEBUG "%s: no buffer for rx frame\n", gp->dev->name); dev->stats.rx_dropped++; } if (gem_status & GREG_STAT_RXTAGERR) { /* corrupt RX tag framing */ if (netif_msg_rx_err(gp)) printk(KERN_DEBUG "%s: corrupt rx tag framing\n", gp->dev->name); dev->stats.rx_errors++; return 1; } if (gem_status & GREG_STAT_PCS) { if (gem_pcs_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_TXMAC) { if (gem_txmac_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_RXMAC) { if (gem_rxmac_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_MAC) { if (gem_mac_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_MIF) { if (gem_mif_interrupt(dev, gp, gem_status)) return 1; } if (gem_status & GREG_STAT_PCIERR) { if (gem_pci_interrupt(dev, gp, gem_status)) return 1; } return 0; } static __inline__ void gem_tx(struct net_device *dev, struct gem *gp, u32 gem_status) { int entry, limit; entry = gp->tx_old; limit = ((gem_status & GREG_STAT_TXNR) >> GREG_STAT_TXNR_SHIFT); while (entry != limit) { struct sk_buff *skb; struct gem_txd *txd; dma_addr_t dma_addr; u32 dma_len; int frag; if (netif_msg_tx_done(gp)) printk(KERN_DEBUG "%s: tx done, slot %d\n", gp->dev->name, entry); skb = gp->tx_skbs[entry]; if (skb_shinfo(skb)->nr_frags) { int last = entry + skb_shinfo(skb)->nr_frags; int walk = entry; int incomplete = 0; last &= (TX_RING_SIZE - 1); for (;;) { walk = NEXT_TX(walk); if (walk == limit) incomplete = 1; if (walk == last) break; } if (incomplete) break; } gp->tx_skbs[entry] = NULL; dev->stats.tx_bytes += skb->len; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { txd = &gp->init_block->txd[entry]; dma_addr = le64_to_cpu(txd->buffer); dma_len = le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ; dma_unmap_page(&gp->pdev->dev, dma_addr, dma_len, DMA_TO_DEVICE); entry = NEXT_TX(entry); } dev->stats.tx_packets++; dev_consume_skb_any(skb); } gp->tx_old = entry; /* Need to make the tx_old update visible to gem_start_xmit() * before checking for netif_queue_stopped(). Without the * memory barrier, there is a small possibility that gem_start_xmit() * will miss it and cause the queue to be stopped forever. */ smp_mb(); if (unlikely(netif_queue_stopped(dev) && TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1))) { struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); __netif_tx_lock(txq, smp_processor_id()); if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(dev); __netif_tx_unlock(txq); } } static __inline__ void gem_post_rxds(struct gem *gp, int limit) { int cluster_start, curr, count, kick; cluster_start = curr = (gp->rx_new & ~(4 - 1)); count = 0; kick = -1; dma_wmb(); while (curr != limit) { curr = NEXT_RX(curr); if (++count == 4) { struct gem_rxd *rxd = &gp->init_block->rxd[cluster_start]; for (;;) { rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); rxd++; cluster_start = NEXT_RX(cluster_start); if (cluster_start == curr) break; } kick = curr; count = 0; } } if (kick >= 0) { mb(); writel(kick, gp->regs + RXDMA_KICK); } } #define ALIGNED_RX_SKB_ADDR(addr) \ ((((unsigned long)(addr) + (64UL - 1UL)) & ~(64UL - 1UL)) - (unsigned long)(addr)) static __inline__ struct sk_buff *gem_alloc_skb(struct net_device *dev, int size, gfp_t gfp_flags) { struct sk_buff *skb = alloc_skb(size + 64, gfp_flags); if (likely(skb)) { unsigned long offset = ALIGNED_RX_SKB_ADDR(skb->data); skb_reserve(skb, offset); } return skb; } static int gem_rx(struct gem *gp, int work_to_do) { struct net_device *dev = gp->dev; int entry, drops, work_done = 0; u32 done; if (netif_msg_rx_status(gp)) printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n", gp->dev->name, readl(gp->regs + RXDMA_DONE), gp->rx_new); entry = gp->rx_new; drops = 0; done = readl(gp->regs + RXDMA_DONE); for (;;) { struct gem_rxd *rxd = &gp->init_block->rxd[entry]; struct sk_buff *skb; u64 status = le64_to_cpu(rxd->status_word); dma_addr_t dma_addr; int len; if ((status & RXDCTRL_OWN) != 0) break; if (work_done >= RX_RING_SIZE || work_done >= work_to_do) break; /* When writing back RX descriptor, GEM writes status * then buffer address, possibly in separate transactions. * If we don't wait for the chip to write both, we could * post a new buffer to this descriptor then have GEM spam * on the buffer address. We sync on the RX completion * register to prevent this from happening. */ if (entry == done) { done = readl(gp->regs + RXDMA_DONE); if (entry == done) break; } /* We can now account for the work we're about to do */ work_done++; skb = gp->rx_skbs[entry]; len = (status & RXDCTRL_BUFSZ) >> 16; if ((len < ETH_ZLEN) || (status & RXDCTRL_BAD)) { dev->stats.rx_errors++; if (len < ETH_ZLEN) dev->stats.rx_length_errors++; if (len & RXDCTRL_BAD) dev->stats.rx_crc_errors++; /* We'll just return it to GEM. */ drop_it: dev->stats.rx_dropped++; goto next; } dma_addr = le64_to_cpu(rxd->buffer); if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; new_skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_ATOMIC); if (new_skb == NULL) { drops++; goto drop_it; } dma_unmap_page(&gp->pdev->dev, dma_addr, RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE); gp->rx_skbs[entry] = new_skb; skb_put(new_skb, (gp->rx_buf_sz + RX_OFFSET)); rxd->buffer = cpu_to_le64(dma_map_page(&gp->pdev->dev, virt_to_page(new_skb->data), offset_in_page(new_skb->data), RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE)); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); if (copy_skb == NULL) { drops++; goto drop_it; } skb_reserve(copy_skb, 2); skb_put(copy_skb, len); dma_sync_single_for_cpu(&gp->pdev->dev, dma_addr, len, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); dma_sync_single_for_device(&gp->pdev->dev, dma_addr, len, DMA_FROM_DEVICE); /* We'll reuse the original ring buffer. */ skb = copy_skb; } if (likely(dev->features & NETIF_F_RXCSUM)) { __sum16 csum; csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff); skb->csum = csum_unfold(csum); skb->ip_summed = CHECKSUM_COMPLETE; } skb->protocol = eth_type_trans(skb, gp->dev); napi_gro_receive(&gp->napi, skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; next: entry = NEXT_RX(entry); } gem_post_rxds(gp, entry); gp->rx_new = entry; if (drops) netdev_info(gp->dev, "Memory squeeze, deferring packet\n"); return work_done; } static int gem_poll(struct napi_struct *napi, int budget) { struct gem *gp = container_of(napi, struct gem, napi); struct net_device *dev = gp->dev; int work_done; work_done = 0; do { /* Handle anomalies */ if (unlikely(gp->status & GREG_STAT_ABNORMAL)) { struct netdev_queue *txq = netdev_get_tx_queue(dev, 0); int reset; /* We run the abnormal interrupt handling code with * the Tx lock. It only resets the Rx portion of the * chip, but we need to guard it against DMA being * restarted by the link poll timer */ __netif_tx_lock(txq, smp_processor_id()); reset = gem_abnormal_irq(dev, gp, gp->status); __netif_tx_unlock(txq); if (reset) { gem_schedule_reset(gp); napi_complete(napi); return work_done; } } /* Run TX completion thread */ gem_tx(dev, gp, gp->status); /* Run RX thread. We don't use any locking here, * code willing to do bad things - like cleaning the * rx ring - must call napi_disable(), which * schedule_timeout()'s if polling is already disabled. */ work_done += gem_rx(gp, budget - work_done); if (work_done >= budget) return work_done; gp->status = readl(gp->regs + GREG_STAT); } while (gp->status & GREG_STAT_NAPI); napi_complete_done(napi, work_done); gem_enable_ints(gp); return work_done; } static irqreturn_t gem_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct gem *gp = netdev_priv(dev); if (napi_schedule_prep(&gp->napi)) { u32 gem_status = readl(gp->regs + GREG_STAT); if (unlikely(gem_status == 0)) { napi_enable(&gp->napi); return IRQ_NONE; } if (netif_msg_intr(gp)) printk(KERN_DEBUG "%s: gem_interrupt() gem_status: 0x%x\n", gp->dev->name, gem_status); gp->status = gem_status; gem_disable_ints(gp); __napi_schedule(&gp->napi); } /* If polling was disabled at the time we received that * interrupt, we may return IRQ_HANDLED here while we * should return IRQ_NONE. No big deal... */ return IRQ_HANDLED; } static void gem_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct gem *gp = netdev_priv(dev); netdev_err(dev, "transmit timed out, resetting\n"); netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", readl(gp->regs + TXDMA_CFG), readl(gp->regs + MAC_TXSTAT), readl(gp->regs + MAC_TXCFG)); netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", readl(gp->regs + RXDMA_CFG), readl(gp->regs + MAC_RXSTAT), readl(gp->regs + MAC_RXCFG)); gem_schedule_reset(gp); } static __inline__ int gem_intme(int entry) { /* Algorithm: IRQ every 1/2 of descriptors. */ if (!(entry & ((TX_RING_SIZE>>1)-1))) return 1; return 0; } static netdev_tx_t gem_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct gem *gp = netdev_priv(dev); int entry; u64 ctrl; ctrl = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { const u64 csum_start_off = skb_checksum_start_offset(skb); const u64 csum_stuff_off = csum_start_off + skb->csum_offset; ctrl = (TXDCTRL_CENAB | (csum_start_off << 15) | (csum_stuff_off << 21)); } if (unlikely(TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1))) { /* This is a hard error, log it. */ if (!netif_queue_stopped(dev)) { netif_stop_queue(dev); netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); } return NETDEV_TX_BUSY; } entry = gp->tx_new; gp->tx_skbs[entry] = skb; if (skb_shinfo(skb)->nr_frags == 0) { struct gem_txd *txd = &gp->init_block->txd[entry]; dma_addr_t mapping; u32 len; len = skb->len; mapping = dma_map_page(&gp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), len, DMA_TO_DEVICE); ctrl |= TXDCTRL_SOF | TXDCTRL_EOF | len; if (gem_intme(entry)) ctrl |= TXDCTRL_INTME; txd->buffer = cpu_to_le64(mapping); dma_wmb(); txd->control_word = cpu_to_le64(ctrl); entry = NEXT_TX(entry); } else { struct gem_txd *txd; u32 first_len; u64 intme; dma_addr_t first_mapping; int frag, first_entry = entry; intme = 0; if (gem_intme(entry)) intme |= TXDCTRL_INTME; /* We must give this initial chunk to the device last. * Otherwise we could race with the device. */ first_len = skb_headlen(skb); first_mapping = dma_map_page(&gp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), first_len, DMA_TO_DEVICE); entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; u32 len; dma_addr_t mapping; u64 this_ctrl; len = skb_frag_size(this_frag); mapping = skb_frag_dma_map(&gp->pdev->dev, this_frag, 0, len, DMA_TO_DEVICE); this_ctrl = ctrl; if (frag == skb_shinfo(skb)->nr_frags - 1) this_ctrl |= TXDCTRL_EOF; txd = &gp->init_block->txd[entry]; txd->buffer = cpu_to_le64(mapping); dma_wmb(); txd->control_word = cpu_to_le64(this_ctrl | len); if (gem_intme(entry)) intme |= TXDCTRL_INTME; entry = NEXT_TX(entry); } txd = &gp->init_block->txd[first_entry]; txd->buffer = cpu_to_le64(first_mapping); dma_wmb(); txd->control_word = cpu_to_le64(ctrl | TXDCTRL_SOF | intme | first_len); } gp->tx_new = entry; if (unlikely(TX_BUFFS_AVAIL(gp) <= (MAX_SKB_FRAGS + 1))) { netif_stop_queue(dev); /* netif_stop_queue() must be done before checking * tx index in TX_BUFFS_AVAIL() below, because * in gem_tx(), we update tx_old before checking for * netif_queue_stopped(). */ smp_mb(); if (TX_BUFFS_AVAIL(gp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(dev); } if (netif_msg_tx_queued(gp)) printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n", dev->name, entry, skb->len); mb(); writel(gp->tx_new, gp->regs + TXDMA_KICK); return NETDEV_TX_OK; } static void gem_pcs_reset(struct gem *gp) { int limit; u32 val; /* Reset PCS unit. */ val = readl(gp->regs + PCS_MIICTRL); val |= PCS_MIICTRL_RST; writel(val, gp->regs + PCS_MIICTRL); limit = 32; while (readl(gp->regs + PCS_MIICTRL) & PCS_MIICTRL_RST) { udelay(100); if (limit-- <= 0) break; } if (limit < 0) netdev_warn(gp->dev, "PCS reset bit would not clear\n"); } static void gem_pcs_reinit_adv(struct gem *gp) { u32 val; /* Make sure PCS is disabled while changing advertisement * configuration. */ val = readl(gp->regs + PCS_CFG); val &= ~(PCS_CFG_ENABLE | PCS_CFG_TO); writel(val, gp->regs + PCS_CFG); /* Advertise all capabilities except asymmetric * pause. */ val = readl(gp->regs + PCS_MIIADV); val |= (PCS_MIIADV_FD | PCS_MIIADV_HD | PCS_MIIADV_SP | PCS_MIIADV_AP); writel(val, gp->regs + PCS_MIIADV); /* Enable and restart auto-negotiation, disable wrapback/loopback, * and re-enable PCS. */ val = readl(gp->regs + PCS_MIICTRL); val |= (PCS_MIICTRL_RAN | PCS_MIICTRL_ANE); val &= ~PCS_MIICTRL_WB; writel(val, gp->regs + PCS_MIICTRL); val = readl(gp->regs + PCS_CFG); val |= PCS_CFG_ENABLE; writel(val, gp->regs + PCS_CFG); /* Make sure serialink loopback is off. The meaning * of this bit is logically inverted based upon whether * you are in Serialink or SERDES mode. */ val = readl(gp->regs + PCS_SCTRL); if (gp->phy_type == phy_serialink) val &= ~PCS_SCTRL_LOOP; else val |= PCS_SCTRL_LOOP; writel(val, gp->regs + PCS_SCTRL); } #define STOP_TRIES 32 static void gem_reset(struct gem *gp) { int limit; u32 val; /* Make sure we won't get any more interrupts */ writel(0xffffffff, gp->regs + GREG_IMASK); /* Reset the chip */ writel(gp->swrst_base | GREG_SWRST_TXRST | GREG_SWRST_RXRST, gp->regs + GREG_SWRST); limit = STOP_TRIES; do { udelay(20); val = readl(gp->regs + GREG_SWRST); if (limit-- <= 0) break; } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); if (limit < 0) netdev_err(gp->dev, "SW reset is ghetto\n"); if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) gem_pcs_reinit_adv(gp); } static void gem_start_dma(struct gem *gp) { u32 val; /* We are ready to rock, turn everything on. */ val = readl(gp->regs + TXDMA_CFG); writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); val = readl(gp->regs + RXDMA_CFG); writel(val | RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); val = readl(gp->regs + MAC_TXCFG); writel(val | MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); (void) readl(gp->regs + MAC_RXCFG); udelay(100); gem_enable_ints(gp); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); } /* DMA won't be actually stopped before about 4ms tho ... */ static void gem_stop_dma(struct gem *gp) { u32 val; /* We are done rocking, turn everything off. */ val = readl(gp->regs + TXDMA_CFG); writel(val & ~TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); val = readl(gp->regs + RXDMA_CFG); writel(val & ~RXDMA_CFG_ENABLE, gp->regs + RXDMA_CFG); val = readl(gp->regs + MAC_TXCFG); writel(val & ~MAC_TXCFG_ENAB, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); (void) readl(gp->regs + MAC_RXCFG); /* Need to wait a bit ... done by the caller */ } // XXX dbl check what that function should do when called on PCS PHY static void gem_begin_auto_negotiation(struct gem *gp, const struct ethtool_link_ksettings *ep) { u32 advertise, features; int autoneg; int speed; int duplex; u32 advertising; if (ep) ethtool_convert_link_mode_to_legacy_u32( &advertising, ep->link_modes.advertising); if (gp->phy_type != phy_mii_mdio0 && gp->phy_type != phy_mii_mdio1) goto non_mii; /* Setup advertise */ if (found_mii_phy(gp)) features = gp->phy_mii.def->features; else features = 0; advertise = features & ADVERTISE_MASK; if (gp->phy_mii.advertising != 0) advertise &= gp->phy_mii.advertising; autoneg = gp->want_autoneg; speed = gp->phy_mii.speed; duplex = gp->phy_mii.duplex; /* Setup link parameters */ if (!ep) goto start_aneg; if (ep->base.autoneg == AUTONEG_ENABLE) { advertise = advertising; autoneg = 1; } else { autoneg = 0; speed = ep->base.speed; duplex = ep->base.duplex; } start_aneg: /* Sanitize settings based on PHY capabilities */ if ((features & SUPPORTED_Autoneg) == 0) autoneg = 0; if (speed == SPEED_1000 && !(features & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full))) speed = SPEED_100; if (speed == SPEED_100 && !(features & (SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full))) speed = SPEED_10; if (duplex == DUPLEX_FULL && !(features & (SUPPORTED_1000baseT_Full | SUPPORTED_100baseT_Full | SUPPORTED_10baseT_Full))) duplex = DUPLEX_HALF; if (speed == 0) speed = SPEED_10; /* If we are asleep, we don't try to actually setup the PHY, we * just store the settings */ if (!netif_device_present(gp->dev)) { gp->phy_mii.autoneg = gp->want_autoneg = autoneg; gp->phy_mii.speed = speed; gp->phy_mii.duplex = duplex; return; } /* Configure PHY & start aneg */ gp->want_autoneg = autoneg; if (autoneg) { if (found_mii_phy(gp)) gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, advertise); gp->lstate = link_aneg; } else { if (found_mii_phy(gp)) gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, speed, duplex); gp->lstate = link_force_ok; } non_mii: gp->timer_ticks = 0; mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); } /* A link-up condition has occurred, initialize and enable the * rest of the chip. */ static int gem_set_link_modes(struct gem *gp) { struct netdev_queue *txq = netdev_get_tx_queue(gp->dev, 0); int full_duplex, speed, pause; u32 val; full_duplex = 0; speed = SPEED_10; pause = 0; if (found_mii_phy(gp)) { if (gp->phy_mii.def->ops->read_link(&gp->phy_mii)) return 1; full_duplex = (gp->phy_mii.duplex == DUPLEX_FULL); speed = gp->phy_mii.speed; pause = gp->phy_mii.pause; } else if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) { u32 pcs_lpa = readl(gp->regs + PCS_MIILP); if ((pcs_lpa & PCS_MIIADV_FD) || gp->phy_type == phy_serdes) full_duplex = 1; speed = SPEED_1000; } netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", speed, (full_duplex ? "full" : "half")); /* We take the tx queue lock to avoid collisions between * this code, the tx path and the NAPI-driven error path */ __netif_tx_lock(txq, smp_processor_id()); val = (MAC_TXCFG_EIPG0 | MAC_TXCFG_NGU); if (full_duplex) { val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); } else { /* MAC_TXCFG_NBO must be zero. */ } writel(val, gp->regs + MAC_TXCFG); val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); if (!full_duplex && (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1)) { val |= MAC_XIFCFG_DISE; } else if (full_duplex) { val |= MAC_XIFCFG_FLED; } if (speed == SPEED_1000) val |= (MAC_XIFCFG_GMII); writel(val, gp->regs + MAC_XIFCFG); /* If gigabit and half-duplex, enable carrier extension * mode. Else, disable it. */ if (speed == SPEED_1000 && !full_duplex) { val = readl(gp->regs + MAC_TXCFG); writel(val | MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val | MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); } else { val = readl(gp->regs + MAC_TXCFG); writel(val & ~MAC_TXCFG_TCE, gp->regs + MAC_TXCFG); val = readl(gp->regs + MAC_RXCFG); writel(val & ~MAC_RXCFG_RCE, gp->regs + MAC_RXCFG); } if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) { u32 pcs_lpa = readl(gp->regs + PCS_MIILP); if (pcs_lpa & (PCS_MIIADV_SP | PCS_MIIADV_AP)) pause = 1; } if (!full_duplex) writel(512, gp->regs + MAC_STIME); else writel(64, gp->regs + MAC_STIME); val = readl(gp->regs + MAC_MCCFG); if (pause) val |= (MAC_MCCFG_SPE | MAC_MCCFG_RPE); else val &= ~(MAC_MCCFG_SPE | MAC_MCCFG_RPE); writel(val, gp->regs + MAC_MCCFG); gem_start_dma(gp); __netif_tx_unlock(txq); if (netif_msg_link(gp)) { if (pause) { netdev_info(gp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", gp->rx_fifo_sz, gp->rx_pause_off, gp->rx_pause_on); } else { netdev_info(gp->dev, "Pause is disabled\n"); } } return 0; } static int gem_mdio_link_not_up(struct gem *gp) { switch (gp->lstate) { case link_force_ret: netif_info(gp, link, gp->dev, "Autoneg failed again, keeping forced mode\n"); gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, gp->last_forced_speed, DUPLEX_HALF); gp->timer_ticks = 5; gp->lstate = link_force_ok; return 0; case link_aneg: /* We try forced modes after a failed aneg only on PHYs that don't * have "magic_aneg" bit set, which means they internally do the * while forced-mode thingy. On these, we just restart aneg */ if (gp->phy_mii.def->magic_aneg) return 1; netif_info(gp, link, gp->dev, "switching to forced 100bt\n"); /* Try forced modes. */ gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, DUPLEX_HALF); gp->timer_ticks = 5; gp->lstate = link_force_try; return 0; case link_force_try: /* Downgrade from 100 to 10 Mbps if necessary. * If already at 10Mbps, warn user about the * situation every 10 ticks. */ if (gp->phy_mii.speed == SPEED_100) { gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, DUPLEX_HALF); gp->timer_ticks = 5; netif_info(gp, link, gp->dev, "switching to forced 10bt\n"); return 0; } else return 1; default: return 0; } } static void gem_link_timer(struct timer_list *t) { struct gem *gp = from_timer(gp, t, link_timer); struct net_device *dev = gp->dev; int restart_aneg = 0; /* There's no point doing anything if we're going to be reset */ if (gp->reset_task_pending) return; if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) { u32 val = readl(gp->regs + PCS_MIISTAT); if (!(val & PCS_MIISTAT_LS)) val = readl(gp->regs + PCS_MIISTAT); if ((val & PCS_MIISTAT_LS) != 0) { if (gp->lstate == link_up) goto restart; gp->lstate = link_up; netif_carrier_on(dev); (void)gem_set_link_modes(gp); } goto restart; } if (found_mii_phy(gp) && gp->phy_mii.def->ops->poll_link(&gp->phy_mii)) { /* Ok, here we got a link. If we had it due to a forced * fallback, and we were configured for autoneg, we do * retry a short autoneg pass. If you know your hub is * broken, use ethtool ;) */ if (gp->lstate == link_force_try && gp->want_autoneg) { gp->lstate = link_force_ret; gp->last_forced_speed = gp->phy_mii.speed; gp->timer_ticks = 5; if (netif_msg_link(gp)) netdev_info(dev, "Got link after fallback, retrying autoneg once...\n"); gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); } else if (gp->lstate != link_up) { gp->lstate = link_up; netif_carrier_on(dev); if (gem_set_link_modes(gp)) restart_aneg = 1; } } else { /* If the link was previously up, we restart the * whole process */ if (gp->lstate == link_up) { gp->lstate = link_down; netif_info(gp, link, dev, "Link down\n"); netif_carrier_off(dev); gem_schedule_reset(gp); /* The reset task will restart the timer */ return; } else if (++gp->timer_ticks > 10) { if (found_mii_phy(gp)) restart_aneg = gem_mdio_link_not_up(gp); else restart_aneg = 1; } } if (restart_aneg) { gem_begin_auto_negotiation(gp, NULL); return; } restart: mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); } static void gem_clean_rings(struct gem *gp) { struct gem_init_block *gb = gp->init_block; struct sk_buff *skb; int i; dma_addr_t dma_addr; for (i = 0; i < RX_RING_SIZE; i++) { struct gem_rxd *rxd; rxd = &gb->rxd[i]; if (gp->rx_skbs[i] != NULL) { skb = gp->rx_skbs[i]; dma_addr = le64_to_cpu(rxd->buffer); dma_unmap_page(&gp->pdev->dev, dma_addr, RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE); dev_kfree_skb_any(skb); gp->rx_skbs[i] = NULL; } rxd->status_word = 0; dma_wmb(); rxd->buffer = 0; } for (i = 0; i < TX_RING_SIZE; i++) { if (gp->tx_skbs[i] != NULL) { struct gem_txd *txd; int frag; skb = gp->tx_skbs[i]; gp->tx_skbs[i] = NULL; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { int ent = i & (TX_RING_SIZE - 1); txd = &gb->txd[ent]; dma_addr = le64_to_cpu(txd->buffer); dma_unmap_page(&gp->pdev->dev, dma_addr, le64_to_cpu(txd->control_word) & TXDCTRL_BUFSZ, DMA_TO_DEVICE); if (frag != skb_shinfo(skb)->nr_frags) i++; } dev_kfree_skb_any(skb); } } } static void gem_init_rings(struct gem *gp) { struct gem_init_block *gb = gp->init_block; struct net_device *dev = gp->dev; int i; dma_addr_t dma_addr; gp->rx_new = gp->rx_old = gp->tx_new = gp->tx_old = 0; gem_clean_rings(gp); gp->rx_buf_sz = max(dev->mtu + ETH_HLEN + VLAN_HLEN, (unsigned)VLAN_ETH_FRAME_LEN); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; struct gem_rxd *rxd = &gb->rxd[i]; skb = gem_alloc_skb(dev, RX_BUF_ALLOC_SIZE(gp), GFP_KERNEL); if (!skb) { rxd->buffer = 0; rxd->status_word = 0; continue; } gp->rx_skbs[i] = skb; skb_put(skb, (gp->rx_buf_sz + RX_OFFSET)); dma_addr = dma_map_page(&gp->pdev->dev, virt_to_page(skb->data), offset_in_page(skb->data), RX_BUF_ALLOC_SIZE(gp), DMA_FROM_DEVICE); rxd->buffer = cpu_to_le64(dma_addr); dma_wmb(); rxd->status_word = cpu_to_le64(RXDCTRL_FRESH(gp)); skb_reserve(skb, RX_OFFSET); } for (i = 0; i < TX_RING_SIZE; i++) { struct gem_txd *txd = &gb->txd[i]; txd->control_word = 0; dma_wmb(); txd->buffer = 0; } wmb(); } /* Init PHY interface and start link poll state machine */ static void gem_init_phy(struct gem *gp) { u32 mifcfg; /* Revert MIF CFG setting done on stop_phy */ mifcfg = readl(gp->regs + MIF_CFG); mifcfg &= ~MIF_CFG_BBMODE; writel(mifcfg, gp->regs + MIF_CFG); if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { int i; /* Those delays sucks, the HW seems to love them though, I'll * seriously consider breaking some locks here to be able * to schedule instead */ for (i = 0; i < 3; i++) { #ifdef CONFIG_PPC_PMAC pmac_call_feature(PMAC_FTR_GMAC_PHY_RESET, gp->of_node, 0, 0); msleep(20); #endif /* Some PHYs used by apple have problem getting back to us, * we do an additional reset here */ sungem_phy_write(gp, MII_BMCR, BMCR_RESET); msleep(20); if (sungem_phy_read(gp, MII_BMCR) != 0xffff) break; if (i == 2) netdev_warn(gp->dev, "GMAC PHY not responding !\n"); } } if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { u32 val; /* Init datapath mode register. */ if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { val = PCS_DMODE_MGM; } else if (gp->phy_type == phy_serialink) { val = PCS_DMODE_SM | PCS_DMODE_GMOE; } else { val = PCS_DMODE_ESM; } writel(val, gp->regs + PCS_DMODE); } if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { /* Reset and detect MII PHY */ sungem_phy_probe(&gp->phy_mii, gp->mii_phy_addr); /* Init PHY */ if (gp->phy_mii.def && gp->phy_mii.def->ops->init) gp->phy_mii.def->ops->init(&gp->phy_mii); } else { gem_pcs_reset(gp); gem_pcs_reinit_adv(gp); } /* Default aneg parameters */ gp->timer_ticks = 0; gp->lstate = link_down; netif_carrier_off(gp->dev); /* Print things out */ if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) netdev_info(gp->dev, "Found %s PHY\n", gp->phy_mii.def ? gp->phy_mii.def->name : "no"); gem_begin_auto_negotiation(gp, NULL); } static void gem_init_dma(struct gem *gp) { u64 desc_dma = (u64) gp->gblock_dvma; u32 val; val = (TXDMA_CFG_BASE | (0x7ff << 10) | TXDMA_CFG_PMODE); writel(val, gp->regs + TXDMA_CFG); writel(desc_dma >> 32, gp->regs + TXDMA_DBHI); writel(desc_dma & 0xffffffff, gp->regs + TXDMA_DBLOW); desc_dma += (INIT_BLOCK_TX_RING_SIZE * sizeof(struct gem_txd)); writel(0, gp->regs + TXDMA_KICK); val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) | (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128); writel(val, gp->regs + RXDMA_CFG); writel(desc_dma >> 32, gp->regs + RXDMA_DBHI); writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW); writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK); val = (((gp->rx_pause_off / 64) << 0) & RXDMA_PTHRESH_OFF); val |= (((gp->rx_pause_on / 64) << 12) & RXDMA_PTHRESH_ON); writel(val, gp->regs + RXDMA_PTHRESH); if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN) writel(((5 & RXDMA_BLANK_IPKTS) | ((8 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); else writel(((5 & RXDMA_BLANK_IPKTS) | ((4 << 12) & RXDMA_BLANK_ITIME)), gp->regs + RXDMA_BLANK); } static u32 gem_setup_multicast(struct gem *gp) { u32 rxcfg = 0; int i; if ((gp->dev->flags & IFF_ALLMULTI) || (netdev_mc_count(gp->dev) > 256)) { for (i=0; i<16; i++) writel(0xffff, gp->regs + MAC_HASH0 + (i << 2)); rxcfg |= MAC_RXCFG_HFE; } else if (gp->dev->flags & IFF_PROMISC) { rxcfg |= MAC_RXCFG_PROM; } else { u16 hash_table[16]; u32 crc; struct netdev_hw_addr *ha; int i; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, gp->dev) { crc = ether_crc_le(6, ha->addr); crc >>= 24; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } for (i=0; i<16; i++) writel(hash_table[i], gp->regs + MAC_HASH0 + (i << 2)); rxcfg |= MAC_RXCFG_HFE; } return rxcfg; } static void gem_init_mac(struct gem *gp) { const unsigned char *e = &gp->dev->dev_addr[0]; writel(0x1bf0, gp->regs + MAC_SNDPAUSE); writel(0x00, gp->regs + MAC_IPG0); writel(0x08, gp->regs + MAC_IPG1); writel(0x04, gp->regs + MAC_IPG2); writel(0x40, gp->regs + MAC_STIME); writel(0x40, gp->regs + MAC_MINFSZ); /* Ethernet payload + header + FCS + optional VLAN tag. */ writel(0x20000000 | (gp->rx_buf_sz + 4), gp->regs + MAC_MAXFSZ); writel(0x07, gp->regs + MAC_PASIZE); writel(0x04, gp->regs + MAC_JAMSIZE); writel(0x10, gp->regs + MAC_ATTLIM); writel(0x8808, gp->regs + MAC_MCTYPE); writel((e[5] | (e[4] << 8)) & 0x3ff, gp->regs + MAC_RANDSEED); writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); writel(0, gp->regs + MAC_ADDR3); writel(0, gp->regs + MAC_ADDR4); writel(0, gp->regs + MAC_ADDR5); writel(0x0001, gp->regs + MAC_ADDR6); writel(0xc200, gp->regs + MAC_ADDR7); writel(0x0180, gp->regs + MAC_ADDR8); writel(0, gp->regs + MAC_AFILT0); writel(0, gp->regs + MAC_AFILT1); writel(0, gp->regs + MAC_AFILT2); writel(0, gp->regs + MAC_AF21MSK); writel(0, gp->regs + MAC_AF0MSK); gp->mac_rx_cfg = gem_setup_multicast(gp); #ifdef STRIP_FCS gp->mac_rx_cfg |= MAC_RXCFG_SFCS; #endif writel(0, gp->regs + MAC_NCOLL); writel(0, gp->regs + MAC_FASUCC); writel(0, gp->regs + MAC_ECOLL); writel(0, gp->regs + MAC_LCOLL); writel(0, gp->regs + MAC_DTIMER); writel(0, gp->regs + MAC_PATMPS); writel(0, gp->regs + MAC_RFCTR); writel(0, gp->regs + MAC_LERR); writel(0, gp->regs + MAC_AERR); writel(0, gp->regs + MAC_FCSERR); writel(0, gp->regs + MAC_RXCVERR); /* Clear RX/TX/MAC/XIF config, we will set these up and enable * them once a link is established. */ writel(0, gp->regs + MAC_TXCFG); writel(gp->mac_rx_cfg, gp->regs + MAC_RXCFG); writel(0, gp->regs + MAC_MCCFG); writel(0, gp->regs + MAC_XIFCFG); /* Setup MAC interrupts. We want to get all of the interesting * counter expiration events, but we do not want to hear about * normal rx/tx as the DMA engine tells us that. */ writel(MAC_TXSTAT_XMIT, gp->regs + MAC_TXMASK); writel(MAC_RXSTAT_RCV, gp->regs + MAC_RXMASK); /* Don't enable even the PAUSE interrupts for now, we * make no use of those events other than to record them. */ writel(0xffffffff, gp->regs + MAC_MCMASK); /* Don't enable GEM's WOL in normal operations */ if (gp->has_wol) writel(0, gp->regs + WOL_WAKECSR); } static void gem_init_pause_thresholds(struct gem *gp) { u32 cfg; /* Calculate pause thresholds. Setting the OFF threshold to the * full RX fifo size effectively disables PAUSE generation which * is what we do for 10/100 only GEMs which have FIFOs too small * to make real gains from PAUSE. */ if (gp->rx_fifo_sz <= (2 * 1024)) { gp->rx_pause_off = gp->rx_pause_on = gp->rx_fifo_sz; } else { int max_frame = (gp->rx_buf_sz + 4 + 64) & ~63; int off = (gp->rx_fifo_sz - (max_frame * 2)); int on = off - max_frame; gp->rx_pause_off = off; gp->rx_pause_on = on; } /* Configure the chip "burst" DMA mode & enable some * HW bug fixes on Apple version */ cfg = 0; if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) cfg |= GREG_CFG_RONPAULBIT | GREG_CFG_ENBUG2FIX; #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) cfg |= GREG_CFG_IBURST; #endif cfg |= ((31 << 1) & GREG_CFG_TXDMALIM); cfg |= ((31 << 6) & GREG_CFG_RXDMALIM); writel(cfg, gp->regs + GREG_CFG); /* If Infinite Burst didn't stick, then use different * thresholds (and Apple bug fixes don't exist) */ if (!(readl(gp->regs + GREG_CFG) & GREG_CFG_IBURST)) { cfg = ((2 << 1) & GREG_CFG_TXDMALIM); cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); writel(cfg, gp->regs + GREG_CFG); } } static int gem_check_invariants(struct gem *gp) { struct pci_dev *pdev = gp->pdev; u32 mif_cfg; /* On Apple's sungem, we can't rely on registers as the chip * was been powered down by the firmware. The PHY is looked * up later on. */ if (pdev->vendor == PCI_VENDOR_ID_APPLE) { gp->phy_type = phy_mii_mdio0; gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; gp->swrst_base = 0; mif_cfg = readl(gp->regs + MIF_CFG); mif_cfg &= ~(MIF_CFG_PSELECT|MIF_CFG_POLL|MIF_CFG_BBMODE|MIF_CFG_MDI1); mif_cfg |= MIF_CFG_MDI0; writel(mif_cfg, gp->regs + MIF_CFG); writel(PCS_DMODE_MGM, gp->regs + PCS_DMODE); writel(MAC_XIFCFG_OE, gp->regs + MAC_XIFCFG); /* We hard-code the PHY address so we can properly bring it out of * reset later on, we can't really probe it at this point, though * that isn't an issue. */ if (gp->pdev->device == PCI_DEVICE_ID_APPLE_K2_GMAC) gp->mii_phy_addr = 1; else gp->mii_phy_addr = 0; return 0; } mif_cfg = readl(gp->regs + MIF_CFG); if (pdev->vendor == PCI_VENDOR_ID_SUN && pdev->device == PCI_DEVICE_ID_SUN_RIO_GEM) { /* One of the MII PHYs _must_ be present * as this chip has no gigabit PHY. */ if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n", mif_cfg); return -1; } } /* Determine initial PHY interface type guess. MDIO1 is the * external PHY and thus takes precedence over MDIO0. */ if (mif_cfg & MIF_CFG_MDI1) { gp->phy_type = phy_mii_mdio1; mif_cfg |= MIF_CFG_PSELECT; writel(mif_cfg, gp->regs + MIF_CFG); } else if (mif_cfg & MIF_CFG_MDI0) { gp->phy_type = phy_mii_mdio0; mif_cfg &= ~MIF_CFG_PSELECT; writel(mif_cfg, gp->regs + MIF_CFG); } else { #ifdef CONFIG_SPARC const char *p; p = of_get_property(gp->of_node, "shared-pins", NULL); if (p && !strcmp(p, "serdes")) gp->phy_type = phy_serdes; else #endif gp->phy_type = phy_serialink; } if (gp->phy_type == phy_mii_mdio1 || gp->phy_type == phy_mii_mdio0) { int i; for (i = 0; i < 32; i++) { gp->mii_phy_addr = i; if (sungem_phy_read(gp, MII_BMCR) != 0xffff) break; } if (i == 32) { if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { pr_err("RIO MII phy will not respond\n"); return -1; } gp->phy_type = phy_serdes; } } /* Fetch the FIFO configurations now too. */ gp->tx_fifo_sz = readl(gp->regs + TXDMA_FSZ) * 64; gp->rx_fifo_sz = readl(gp->regs + RXDMA_FSZ) * 64; if (pdev->vendor == PCI_VENDOR_ID_SUN) { if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { if (gp->tx_fifo_sz != (9 * 1024) || gp->rx_fifo_sz != (20 * 1024)) { pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n", gp->tx_fifo_sz, gp->rx_fifo_sz); return -1; } gp->swrst_base = 0; } else { if (gp->tx_fifo_sz != (2 * 1024) || gp->rx_fifo_sz != (2 * 1024)) { pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", gp->tx_fifo_sz, gp->rx_fifo_sz); return -1; } gp->swrst_base = (64 / 4) << GREG_SWRST_CACHE_SHIFT; } } return 0; } static void gem_reinit_chip(struct gem *gp) { /* Reset the chip */ gem_reset(gp); /* Make sure ints are disabled */ gem_disable_ints(gp); /* Allocate & setup ring buffers */ gem_init_rings(gp); /* Configure pause thresholds */ gem_init_pause_thresholds(gp); /* Init DMA & MAC engines */ gem_init_dma(gp); gem_init_mac(gp); } static void gem_stop_phy(struct gem *gp, int wol) { u32 mifcfg; /* Let the chip settle down a bit, it seems that helps * for sleep mode on some models */ msleep(10); /* Make sure we aren't polling PHY status change. We * don't currently use that feature though */ mifcfg = readl(gp->regs + MIF_CFG); mifcfg &= ~MIF_CFG_POLL; writel(mifcfg, gp->regs + MIF_CFG); if (wol && gp->has_wol) { const unsigned char *e = &gp->dev->dev_addr[0]; u32 csr; /* Setup wake-on-lan for MAGIC packet */ writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); writel(WOL_MCOUNT_N | WOL_MCOUNT_M, gp->regs + WOL_MCOUNT); csr = WOL_WAKECSR_ENABLE; if ((readl(gp->regs + MAC_XIFCFG) & MAC_XIFCFG_GMII) == 0) csr |= WOL_WAKECSR_MII; writel(csr, gp->regs + WOL_WAKECSR); } else { writel(0, gp->regs + MAC_RXCFG); (void)readl(gp->regs + MAC_RXCFG); /* Machine sleep will die in strange ways if we * dont wait a bit here, looks like the chip takes * some time to really shut down */ msleep(10); } writel(0, gp->regs + MAC_TXCFG); writel(0, gp->regs + MAC_XIFCFG); writel(0, gp->regs + TXDMA_CFG); writel(0, gp->regs + RXDMA_CFG); if (!wol) { gem_reset(gp); writel(MAC_TXRST_CMD, gp->regs + MAC_TXRST); writel(MAC_RXRST_CMD, gp->regs + MAC_RXRST); if (found_mii_phy(gp) && gp->phy_mii.def->ops->suspend) gp->phy_mii.def->ops->suspend(&gp->phy_mii); /* According to Apple, we must set the MDIO pins to this begnign * state or we may 1) eat more current, 2) damage some PHYs */ writel(mifcfg | MIF_CFG_BBMODE, gp->regs + MIF_CFG); writel(0, gp->regs + MIF_BBCLK); writel(0, gp->regs + MIF_BBDATA); writel(0, gp->regs + MIF_BBOENAB); writel(MAC_XIFCFG_GMII | MAC_XIFCFG_LBCK, gp->regs + MAC_XIFCFG); (void) readl(gp->regs + MAC_XIFCFG); } } static int gem_do_start(struct net_device *dev) { struct gem *gp = netdev_priv(dev); int rc; pci_set_master(gp->pdev); /* Init & setup chip hardware */ gem_reinit_chip(gp); /* An interrupt might come in handy */ rc = request_irq(gp->pdev->irq, gem_interrupt, IRQF_SHARED, dev->name, (void *)dev); if (rc) { netdev_err(dev, "failed to request irq !\n"); gem_reset(gp); gem_clean_rings(gp); gem_put_cell(gp); return rc; } /* Mark us as attached again if we come from resume(), this has * no effect if we weren't detached and needs to be done now. */ netif_device_attach(dev); /* Restart NAPI & queues */ gem_netif_start(gp); /* Detect & init PHY, start autoneg etc... this will * eventually result in starting DMA operations when * the link is up */ gem_init_phy(gp); return 0; } static void gem_do_stop(struct net_device *dev, int wol) { struct gem *gp = netdev_priv(dev); /* Stop NAPI and stop tx queue */ gem_netif_stop(gp); /* Make sure ints are disabled. We don't care about * synchronizing as NAPI is disabled, thus a stray * interrupt will do nothing bad (our irq handler * just schedules NAPI) */ gem_disable_ints(gp); /* Stop the link timer */ del_timer_sync(&gp->link_timer); /* We cannot cancel the reset task while holding the * rtnl lock, we'd get an A->B / B->A deadlock stituation * if we did. This is not an issue however as the reset * task is synchronized vs. us (rtnl_lock) and will do * nothing if the device is down or suspended. We do * still clear reset_task_pending to avoid a spurrious * reset later on in case we do resume before it gets * scheduled. */ gp->reset_task_pending = 0; /* If we are going to sleep with WOL */ gem_stop_dma(gp); msleep(10); if (!wol) gem_reset(gp); msleep(10); /* Get rid of rings */ gem_clean_rings(gp); /* No irq needed anymore */ free_irq(gp->pdev->irq, (void *) dev); /* Shut the PHY down eventually and setup WOL */ gem_stop_phy(gp, wol); } static void gem_reset_task(struct work_struct *work) { struct gem *gp = container_of(work, struct gem, reset_task); /* Lock out the network stack (essentially shield ourselves * against a racing open, close, control call, or suspend */ rtnl_lock(); /* Skip the reset task if suspended or closed, or if it's * been cancelled by gem_do_stop (see comment there) */ if (!netif_device_present(gp->dev) || !netif_running(gp->dev) || !gp->reset_task_pending) { rtnl_unlock(); return; } /* Stop the link timer */ del_timer_sync(&gp->link_timer); /* Stop NAPI and tx */ gem_netif_stop(gp); /* Reset the chip & rings */ gem_reinit_chip(gp); if (gp->lstate == link_up) gem_set_link_modes(gp); /* Restart NAPI and Tx */ gem_netif_start(gp); /* We are back ! */ gp->reset_task_pending = 0; /* If the link is not up, restart autoneg, else restart the * polling timer */ if (gp->lstate != link_up) gem_begin_auto_negotiation(gp, NULL); else mod_timer(&gp->link_timer, jiffies + ((12 * HZ) / 10)); rtnl_unlock(); } static int gem_open(struct net_device *dev) { struct gem *gp = netdev_priv(dev); int rc; /* We allow open while suspended, we just do nothing, * the chip will be initialized in resume() */ if (netif_device_present(dev)) { /* Enable the cell */ gem_get_cell(gp); /* Make sure PCI access and bus master are enabled */ rc = pci_enable_device(gp->pdev); if (rc) { netdev_err(dev, "Failed to enable chip on PCI bus !\n"); /* Put cell and forget it for now, it will be considered *as still asleep, a new sleep cycle may bring it back */ gem_put_cell(gp); return -ENXIO; } return gem_do_start(dev); } return 0; } static int gem_close(struct net_device *dev) { struct gem *gp = netdev_priv(dev); if (netif_device_present(dev)) { gem_do_stop(dev, 0); /* Make sure bus master is disabled */ pci_disable_device(gp->pdev); /* Cell not needed neither if no WOL */ if (!gp->asleep_wol) gem_put_cell(gp); } return 0; } static int __maybe_unused gem_suspend(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct gem *gp = netdev_priv(dev); /* Lock the network stack first to avoid racing with open/close, * reset task and setting calls */ rtnl_lock(); /* Not running, mark ourselves non-present, no need for * a lock here */ if (!netif_running(dev)) { netif_device_detach(dev); rtnl_unlock(); return 0; } netdev_info(dev, "suspending, WakeOnLan %s\n", (gp->wake_on_lan && netif_running(dev)) ? "enabled" : "disabled"); /* Tell the network stack we're gone. gem_do_stop() below will * synchronize with TX, stop NAPI etc... */ netif_device_detach(dev); /* Switch off chip, remember WOL setting */ gp->asleep_wol = !!gp->wake_on_lan; gem_do_stop(dev, gp->asleep_wol); /* Cell not needed neither if no WOL */ if (!gp->asleep_wol) gem_put_cell(gp); /* Unlock the network stack */ rtnl_unlock(); return 0; } static int __maybe_unused gem_resume(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); struct gem *gp = netdev_priv(dev); /* See locking comment in gem_suspend */ rtnl_lock(); /* Not running, mark ourselves present, no need for * a lock here */ if (!netif_running(dev)) { netif_device_attach(dev); rtnl_unlock(); return 0; } /* Enable the cell */ gem_get_cell(gp); /* Restart chip. If that fails there isn't much we can do, we * leave things stopped. */ gem_do_start(dev); /* If we had WOL enabled, the cell clock was never turned off during * sleep, so we end up beeing unbalanced. Fix that here */ if (gp->asleep_wol) gem_put_cell(gp); /* Unlock the network stack */ rtnl_unlock(); return 0; } static struct net_device_stats *gem_get_stats(struct net_device *dev) { struct gem *gp = netdev_priv(dev); /* I have seen this being called while the PM was in progress, * so we shield against this. Let's also not poke at registers * while the reset task is going on. * * TODO: Move stats collection elsewhere (link timer ?) and * make this a nop to avoid all those synchro issues */ if (!netif_device_present(dev) || !netif_running(dev)) goto bail; /* Better safe than sorry... */ if (WARN_ON(!gp->cell_enabled)) goto bail; dev->stats.rx_crc_errors += readl(gp->regs + MAC_FCSERR); writel(0, gp->regs + MAC_FCSERR); dev->stats.rx_frame_errors += readl(gp->regs + MAC_AERR); writel(0, gp->regs + MAC_AERR); dev->stats.rx_length_errors += readl(gp->regs + MAC_LERR); writel(0, gp->regs + MAC_LERR); dev->stats.tx_aborted_errors += readl(gp->regs + MAC_ECOLL); dev->stats.collisions += (readl(gp->regs + MAC_ECOLL) + readl(gp->regs + MAC_LCOLL)); writel(0, gp->regs + MAC_ECOLL); writel(0, gp->regs + MAC_LCOLL); bail: return &dev->stats; } static int gem_set_mac_address(struct net_device *dev, void *addr) { struct sockaddr *macaddr = (struct sockaddr *) addr; const unsigned char *e = &dev->dev_addr[0]; struct gem *gp = netdev_priv(dev); if (!is_valid_ether_addr(macaddr->sa_data)) return -EADDRNOTAVAIL; eth_hw_addr_set(dev, macaddr->sa_data); /* We'll just catch it later when the device is up'd or resumed */ if (!netif_running(dev) || !netif_device_present(dev)) return 0; /* Better safe than sorry... */ if (WARN_ON(!gp->cell_enabled)) return 0; writel((e[4] << 8) | e[5], gp->regs + MAC_ADDR0); writel((e[2] << 8) | e[3], gp->regs + MAC_ADDR1); writel((e[0] << 8) | e[1], gp->regs + MAC_ADDR2); return 0; } static void gem_set_multicast(struct net_device *dev) { struct gem *gp = netdev_priv(dev); u32 rxcfg, rxcfg_new; int limit = 10000; if (!netif_running(dev) || !netif_device_present(dev)) return; /* Better safe than sorry... */ if (gp->reset_task_pending || WARN_ON(!gp->cell_enabled)) return; rxcfg = readl(gp->regs + MAC_RXCFG); rxcfg_new = gem_setup_multicast(gp); #ifdef STRIP_FCS rxcfg_new |= MAC_RXCFG_SFCS; #endif gp->mac_rx_cfg = rxcfg_new; writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { if (!limit--) break; udelay(10); } rxcfg &= ~(MAC_RXCFG_PROM | MAC_RXCFG_HFE); rxcfg |= rxcfg_new; writel(rxcfg, gp->regs + MAC_RXCFG); } /* Jumbo-grams don't seem to work :-( */ #define GEM_MIN_MTU ETH_MIN_MTU #if 1 #define GEM_MAX_MTU ETH_DATA_LEN #else #define GEM_MAX_MTU 9000 #endif static int gem_change_mtu(struct net_device *dev, int new_mtu) { struct gem *gp = netdev_priv(dev); WRITE_ONCE(dev->mtu, new_mtu); /* We'll just catch it later when the device is up'd or resumed */ if (!netif_running(dev) || !netif_device_present(dev)) return 0; /* Better safe than sorry... */ if (WARN_ON(!gp->cell_enabled)) return 0; gem_netif_stop(gp); gem_reinit_chip(gp); if (gp->lstate == link_up) gem_set_link_modes(gp); gem_netif_start(gp); return 0; } static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct gem *gp = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); strscpy(info->bus_info, pci_name(gp->pdev), sizeof(info->bus_info)); } static int gem_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct gem *gp = netdev_priv(dev); u32 supported, advertising; if (gp->phy_type == phy_mii_mdio0 || gp->phy_type == phy_mii_mdio1) { if (gp->phy_mii.def) supported = gp->phy_mii.def->features; else supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full); /* XXX hardcoded stuff for now */ cmd->base.port = PORT_MII; cmd->base.phy_address = 0; /* XXX fixed PHYAD */ /* Return current PHY settings */ cmd->base.autoneg = gp->want_autoneg; cmd->base.speed = gp->phy_mii.speed; cmd->base.duplex = gp->phy_mii.duplex; advertising = gp->phy_mii.advertising; /* If we started with a forced mode, we don't have a default * advertise set, we need to return something sensible so * userland can re-enable autoneg properly. */ if (advertising == 0) advertising = supported; } else { // XXX PCS ? supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg); advertising = supported; cmd->base.speed = 0; cmd->base.duplex = 0; cmd->base.port = 0; cmd->base.phy_address = 0; cmd->base.autoneg = 0; /* serdes means usually a Fibre connector, with most fixed */ if (gp->phy_type == phy_serdes) { cmd->base.port = PORT_FIBRE; supported = (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full | SUPPORTED_FIBRE | SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_Asym_Pause); advertising = supported; if (gp->lstate == link_up) cmd->base.speed = SPEED_1000; cmd->base.duplex = DUPLEX_FULL; cmd->base.autoneg = 1; } } ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, supported); ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, advertising); return 0; } static int gem_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct gem *gp = netdev_priv(dev); u32 speed = cmd->base.speed; u32 advertising; ethtool_convert_link_mode_to_legacy_u32(&advertising, cmd->link_modes.advertising); /* Verify the settings we care about. */ if (cmd->base.autoneg != AUTONEG_ENABLE && cmd->base.autoneg != AUTONEG_DISABLE) return -EINVAL; if (cmd->base.autoneg == AUTONEG_ENABLE && advertising == 0) return -EINVAL; if (cmd->base.autoneg == AUTONEG_DISABLE && ((speed != SPEED_1000 && speed != SPEED_100 && speed != SPEED_10) || (cmd->base.duplex != DUPLEX_HALF && cmd->base.duplex != DUPLEX_FULL))) return -EINVAL; /* Apply settings and restart link process. */ if (netif_device_present(gp->dev)) { del_timer_sync(&gp->link_timer); gem_begin_auto_negotiation(gp, cmd); } return 0; } static int gem_nway_reset(struct net_device *dev) { struct gem *gp = netdev_priv(dev); if (!gp->want_autoneg) return -EINVAL; /* Restart link process */ if (netif_device_present(gp->dev)) { del_timer_sync(&gp->link_timer); gem_begin_auto_negotiation(gp, NULL); } return 0; } static u32 gem_get_msglevel(struct net_device *dev) { struct gem *gp = netdev_priv(dev); return gp->msg_enable; } static void gem_set_msglevel(struct net_device *dev, u32 value) { struct gem *gp = netdev_priv(dev); gp->msg_enable = value; } /* Add more when I understand how to program the chip */ /* like WAKE_UCAST | WAKE_MCAST | WAKE_BCAST */ #define WOL_SUPPORTED_MASK (WAKE_MAGIC) static void gem_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gem *gp = netdev_priv(dev); /* Add more when I understand how to program the chip */ if (gp->has_wol) { wol->supported = WOL_SUPPORTED_MASK; wol->wolopts = gp->wake_on_lan; } else { wol->supported = 0; wol->wolopts = 0; } } static int gem_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { struct gem *gp = netdev_priv(dev); if (!gp->has_wol) return -EOPNOTSUPP; gp->wake_on_lan = wol->wolopts & WOL_SUPPORTED_MASK; return 0; } static const struct ethtool_ops gem_ethtool_ops = { .get_drvinfo = gem_get_drvinfo, .get_link = ethtool_op_get_link, .nway_reset = gem_nway_reset, .get_msglevel = gem_get_msglevel, .set_msglevel = gem_set_msglevel, .get_wol = gem_get_wol, .set_wol = gem_set_wol, .get_link_ksettings = gem_get_link_ksettings, .set_link_ksettings = gem_set_link_ksettings, }; static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct gem *gp = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(ifr); int rc = -EOPNOTSUPP; /* For SIOCGMIIREG and SIOCSMIIREG the core checks for us that * netif_device_present() is true and holds rtnl_lock for us * so we have nothing to worry about */ switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = gp->mii_phy_addr; fallthrough; case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = __sungem_phy_read(gp, data->phy_id & 0x1f, data->reg_num & 0x1f); rc = 0; break; case SIOCSMIIREG: /* Write MII PHY register. */ __sungem_phy_write(gp, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); rc = 0; break; } return rc; } #if (!defined(CONFIG_SPARC) && !defined(CONFIG_PPC_PMAC)) /* Fetch MAC address from vital product data of PCI ROM. */ static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, unsigned char *dev_addr) { int this_offset; for (this_offset = 0x20; this_offset < len; this_offset++) { void __iomem *p = rom_base + this_offset; int i; if (readb(p + 0) != 0x90 || readb(p + 1) != 0x00 || readb(p + 2) != 0x09 || readb(p + 3) != 0x4e || readb(p + 4) != 0x41 || readb(p + 5) != 0x06) continue; this_offset += 6; p += 6; for (i = 0; i < 6; i++) dev_addr[i] = readb(p + i); return 1; } return 0; } static void get_gem_mac_nonobp(struct pci_dev *pdev, unsigned char *dev_addr) { size_t size; void __iomem *p = pci_map_rom(pdev, &size); if (p) { int found; found = readb(p) == 0x55 && readb(p + 1) == 0xaa && find_eth_addr_in_vpd(p, (64 * 1024), dev_addr); pci_unmap_rom(pdev, p); if (found) return; } /* Sun MAC prefix then 3 random bytes. */ dev_addr[0] = 0x08; dev_addr[1] = 0x00; dev_addr[2] = 0x20; get_random_bytes(dev_addr + 3, 3); } #endif /* not Sparc and not PPC */ static int gem_get_device_address(struct gem *gp) { #if defined(CONFIG_SPARC) || defined(CONFIG_PPC_PMAC) struct net_device *dev = gp->dev; const unsigned char *addr; addr = of_get_property(gp->of_node, "local-mac-address", NULL); if (addr == NULL) { #ifdef CONFIG_SPARC addr = idprom->id_ethaddr; #else printk("\n"); pr_err("%s: can't get mac-address\n", dev->name); return -1; #endif } eth_hw_addr_set(dev, addr); #else u8 addr[ETH_ALEN]; get_gem_mac_nonobp(gp->pdev, addr); eth_hw_addr_set(gp->dev, addr); #endif return 0; } static void gem_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct gem *gp = netdev_priv(dev); unregister_netdev(dev); /* Ensure reset task is truly gone */ cancel_work_sync(&gp->reset_task); /* Free resources */ dma_free_coherent(&pdev->dev, sizeof(struct gem_init_block), gp->init_block, gp->gblock_dvma); iounmap(gp->regs); pci_release_regions(pdev); free_netdev(dev); } } static const struct net_device_ops gem_netdev_ops = { .ndo_open = gem_open, .ndo_stop = gem_close, .ndo_start_xmit = gem_start_xmit, .ndo_get_stats = gem_get_stats, .ndo_set_rx_mode = gem_set_multicast, .ndo_eth_ioctl = gem_ioctl, .ndo_tx_timeout = gem_tx_timeout, .ndo_change_mtu = gem_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = gem_set_mac_address, }; static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { unsigned long gemreg_base, gemreg_len; struct net_device *dev; struct gem *gp; int err, pci_using_dac; printk_once(KERN_INFO "%s", version); /* Apple gmac note: during probe, the chip is powered up by * the arch code to allow the code below to work (and to let * the chip be probed on the config space. It won't stay powered * up until the interface is brought up however, so we can't rely * on register configuration done at this point. */ err = pci_enable_device(pdev); if (err) { pr_err("Cannot enable MMIO operation, aborting\n"); return err; } pci_set_master(pdev); /* Configure DMA attributes. */ /* All of the GEM documentation states that 64-bit DMA addressing * is fully supported and should work just fine. However the * front end for RIO based GEMs is different and only supports * 32-bit addressing. * * For now we assume the various PPC GEMs are 32-bit only as well. */ if (pdev->vendor == PCI_VENDOR_ID_SUN && pdev->device == PCI_DEVICE_ID_SUN_GEM && !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) { pci_using_dac = 1; } else { err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { pr_err("No usable DMA configuration, aborting\n"); goto err_disable_device; } pci_using_dac = 0; } gemreg_base = pci_resource_start(pdev, 0); gemreg_len = pci_resource_len(pdev, 0); if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { pr_err("Cannot find proper PCI device base address, aborting\n"); err = -ENODEV; goto err_disable_device; } dev = alloc_etherdev(sizeof(*gp)); if (!dev) { err = -ENOMEM; goto err_disable_device; } SET_NETDEV_DEV(dev, &pdev->dev); gp = netdev_priv(dev); err = pci_request_regions(pdev, DRV_NAME); if (err) { pr_err("Cannot obtain PCI resources, aborting\n"); goto err_out_free_netdev; } gp->pdev = pdev; gp->dev = dev; gp->msg_enable = DEFAULT_MSG; timer_setup(&gp->link_timer, gem_link_timer, 0); INIT_WORK(&gp->reset_task, gem_reset_task); gp->lstate = link_down; gp->timer_ticks = 0; netif_carrier_off(dev); gp->regs = ioremap(gemreg_base, gemreg_len); if (!gp->regs) { pr_err("Cannot map device registers, aborting\n"); err = -EIO; goto err_out_free_res; } /* On Apple, we want a reference to the Open Firmware device-tree * node. We use it for clock control. */ #if defined(CONFIG_PPC_PMAC) || defined(CONFIG_SPARC) gp->of_node = pci_device_to_OF_node(pdev); #endif /* Only Apple version supports WOL afaik */ if (pdev->vendor == PCI_VENDOR_ID_APPLE) gp->has_wol = 1; /* Make sure cell is enabled */ gem_get_cell(gp); /* Make sure everything is stopped and in init state */ gem_reset(gp); /* Fill up the mii_phy structure (even if we won't use it) */ gp->phy_mii.dev = dev; gp->phy_mii.mdio_read = _sungem_phy_read; gp->phy_mii.mdio_write = _sungem_phy_write; #ifdef CONFIG_PPC_PMAC gp->phy_mii.platform_data = gp->of_node; #endif /* By default, we start with autoneg */ gp->want_autoneg = 1; /* Check fifo sizes, PHY type, etc... */ if (gem_check_invariants(gp)) { err = -ENODEV; goto err_out_iounmap; } /* It is guaranteed that the returned buffer will be at least * PAGE_SIZE aligned. */ gp->init_block = dma_alloc_coherent(&pdev->dev, sizeof(struct gem_init_block), &gp->gblock_dvma, GFP_KERNEL); if (!gp->init_block) { pr_err("Cannot allocate init block, aborting\n"); err = -ENOMEM; goto err_out_iounmap; } err = gem_get_device_address(gp); if (err) goto err_out_free_consistent; dev->netdev_ops = &gem_netdev_ops; netif_napi_add(dev, &gp->napi, gem_poll); dev->ethtool_ops = &gem_ethtool_ops; dev->watchdog_timeo = 5 * HZ; dev->dma = 0; /* Set that now, in case PM kicks in now */ pci_set_drvdata(pdev, dev); /* We can do scatter/gather and HW checksum */ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM; dev->features = dev->hw_features; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; /* MTU range: 68 - 1500 (Jumbo mode is broken) */ dev->min_mtu = GEM_MIN_MTU; dev->max_mtu = GEM_MAX_MTU; /* Register with kernel */ if (register_netdev(dev)) { pr_err("Cannot register net device, aborting\n"); err = -ENOMEM; goto err_out_free_consistent; } /* Undo the get_cell with appropriate locking (we could use * ndo_init/uninit but that would be even more clumsy imho) */ rtnl_lock(); gem_put_cell(gp); rtnl_unlock(); netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", dev->dev_addr); return 0; err_out_free_consistent: gem_remove_one(pdev); err_out_iounmap: gem_put_cell(gp); iounmap(gp->regs); err_out_free_res: pci_release_regions(pdev); err_out_free_netdev: free_netdev(dev); err_disable_device: pci_disable_device(pdev); return err; } static SIMPLE_DEV_PM_OPS(gem_pm_ops, gem_suspend, gem_resume); static struct pci_driver gem_driver = { .name = GEM_MODULE_NAME, .id_table = gem_pci_tbl, .probe = gem_init_one, .remove = gem_remove_one, .driver.pm = &gem_pm_ops, }; module_pci_driver(gem_driver);
// SPDX-License-Identifier: GPL-2.0 /* * irqchip for the IXP4xx interrupt controller * Copyright (C) 2019 Linus Walleij <[email protected]> * * Based on arch/arm/mach-ixp4xx/common.c * Copyright 2002 (C) Intel Corporation * Copyright 2003-2004 (C) MontaVista, Software, Inc. * Copyright (C) Deepak Saxena <[email protected]> */ #include <linux/bitops.h> #include <linux/gpio/driver.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/irqchip.h> #include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/cpu.h> #include <asm/exception.h> #include <asm/mach/irq.h> #define IXP4XX_ICPR 0x00 /* Interrupt Status */ #define IXP4XX_ICMR 0x04 /* Interrupt Enable */ #define IXP4XX_ICLR 0x08 /* Interrupt IRQ/FIQ Select */ #define IXP4XX_ICIP 0x0C /* IRQ Status */ #define IXP4XX_ICFP 0x10 /* FIQ Status */ #define IXP4XX_ICHR 0x14 /* Interrupt Priority */ #define IXP4XX_ICIH 0x18 /* IRQ Highest Pri Int */ #define IXP4XX_ICFH 0x1C /* FIQ Highest Pri Int */ /* IXP43x and IXP46x-only */ #define IXP4XX_ICPR2 0x20 /* Interrupt Status 2 */ #define IXP4XX_ICMR2 0x24 /* Interrupt Enable 2 */ #define IXP4XX_ICLR2 0x28 /* Interrupt IRQ/FIQ Select 2 */ #define IXP4XX_ICIP2 0x2C /* IRQ Status */ #define IXP4XX_ICFP2 0x30 /* FIQ Status */ #define IXP4XX_ICEEN 0x34 /* Error High Pri Enable */ /** * struct ixp4xx_irq - state container for the Faraday IRQ controller * @irqbase: IRQ controller memory base in virtual memory * @is_356: if this is an IXP43x, IXP45x or IX46x SoC (with 64 IRQs) * @irqchip: irqchip for this instance * @domain: IRQ domain for this instance */ struct ixp4xx_irq { void __iomem *irqbase; bool is_356; struct irq_chip irqchip; struct irq_domain *domain; }; /* Local static state container */ static struct ixp4xx_irq ixirq; /* GPIO Clocks */ #define IXP4XX_GPIO_CLK_0 14 #define IXP4XX_GPIO_CLK_1 15 static int ixp4xx_set_irq_type(struct irq_data *d, unsigned int type) { /* All are level active high (asserted) here */ if (type != IRQ_TYPE_LEVEL_HIGH) return -EINVAL; return 0; } static void ixp4xx_irq_mask(struct irq_data *d) { struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d); u32 val; if (ixi->is_356 && d->hwirq >= 32) { val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2); val &= ~BIT(d->hwirq - 32); __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2); } else { val = __raw_readl(ixi->irqbase + IXP4XX_ICMR); val &= ~BIT(d->hwirq); __raw_writel(val, ixi->irqbase + IXP4XX_ICMR); } } /* * Level triggered interrupts on GPIO lines can only be cleared when the * interrupt condition disappears. */ static void ixp4xx_irq_unmask(struct irq_data *d) { struct ixp4xx_irq *ixi = irq_data_get_irq_chip_data(d); u32 val; if (ixi->is_356 && d->hwirq >= 32) { val = __raw_readl(ixi->irqbase + IXP4XX_ICMR2); val |= BIT(d->hwirq - 32); __raw_writel(val, ixi->irqbase + IXP4XX_ICMR2); } else { val = __raw_readl(ixi->irqbase + IXP4XX_ICMR); val |= BIT(d->hwirq); __raw_writel(val, ixi->irqbase + IXP4XX_ICMR); } } static void __exception_irq_entry ixp4xx_handle_irq(struct pt_regs *regs) { struct ixp4xx_irq *ixi = &ixirq; unsigned long status; int i; status = __raw_readl(ixi->irqbase + IXP4XX_ICIP); for_each_set_bit(i, &status, 32) generic_handle_domain_irq(ixi->domain, i); /* * IXP465/IXP435 has an upper IRQ status register */ if (ixi->is_356) { status = __raw_readl(ixi->irqbase + IXP4XX_ICIP2); for_each_set_bit(i, &status, 32) generic_handle_domain_irq(ixi->domain, i + 32); } } static int ixp4xx_irq_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, unsigned long *hwirq, unsigned int *type) { /* We support standard DT translation */ if (is_of_node(fwspec->fwnode) && fwspec->param_count == 2) { *hwirq = fwspec->param[0]; *type = fwspec->param[1]; return 0; } if (is_fwnode_irqchip(fwspec->fwnode)) { if (fwspec->param_count != 2) return -EINVAL; *hwirq = fwspec->param[0]; *type = fwspec->param[1]; WARN_ON(*type == IRQ_TYPE_NONE); return 0; } return -EINVAL; } static int ixp4xx_irq_domain_alloc(struct irq_domain *d, unsigned int irq, unsigned int nr_irqs, void *data) { struct ixp4xx_irq *ixi = d->host_data; irq_hw_number_t hwirq; unsigned int type = IRQ_TYPE_NONE; struct irq_fwspec *fwspec = data; int ret; int i; ret = ixp4xx_irq_domain_translate(d, fwspec, &hwirq, &type); if (ret) return ret; for (i = 0; i < nr_irqs; i++) { /* * TODO: after converting IXP4xx to only device tree, set * handle_bad_irq as default handler and assume all consumers * call .set_type() as this is provided in the second cell in * the device tree phandle. */ irq_domain_set_info(d, irq + i, hwirq + i, &ixi->irqchip, ixi, handle_level_irq, NULL, NULL); irq_set_probe(irq + i); } return 0; } /* * This needs to be a hierarchical irqdomain to work well with the * GPIO irqchip (which is lower in the hierarchy) */ static const struct irq_domain_ops ixp4xx_irqdomain_ops = { .translate = ixp4xx_irq_domain_translate, .alloc = ixp4xx_irq_domain_alloc, .free = irq_domain_free_irqs_common, }; /** * ixp4x_irq_setup() - Common setup code for the IXP4xx interrupt controller * @ixi: State container * @irqbase: Virtual memory base for the interrupt controller * @fwnode: Corresponding fwnode abstraction for this controller * @is_356: if this is an IXP43x, IXP45x or IXP46x SoC variant */ static int __init ixp4xx_irq_setup(struct ixp4xx_irq *ixi, void __iomem *irqbase, struct fwnode_handle *fwnode, bool is_356) { int nr_irqs; ixi->irqbase = irqbase; ixi->is_356 = is_356; /* Route all sources to IRQ instead of FIQ */ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR); /* Disable all interrupts */ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR); if (is_356) { /* Route upper 32 sources to IRQ instead of FIQ */ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICLR2); /* Disable upper 32 interrupts */ __raw_writel(0x0, ixi->irqbase + IXP4XX_ICMR2); nr_irqs = 64; } else { nr_irqs = 32; } ixi->irqchip.name = "IXP4xx"; ixi->irqchip.irq_mask = ixp4xx_irq_mask; ixi->irqchip.irq_unmask = ixp4xx_irq_unmask; ixi->irqchip.irq_set_type = ixp4xx_set_irq_type; ixi->domain = irq_domain_create_linear(fwnode, nr_irqs, &ixp4xx_irqdomain_ops, ixi); if (!ixi->domain) { pr_crit("IXP4XX: can not add primary irqdomain\n"); return -ENODEV; } set_handle_irq(ixp4xx_handle_irq); return 0; } static int __init ixp4xx_of_init_irq(struct device_node *np, struct device_node *parent) { struct ixp4xx_irq *ixi = &ixirq; void __iomem *base; struct fwnode_handle *fwnode; bool is_356; int ret; base = of_iomap(np, 0); if (!base) { pr_crit("IXP4XX: could not ioremap interrupt controller\n"); return -ENODEV; } fwnode = of_node_to_fwnode(np); /* These chip variants have 64 interrupts */ is_356 = of_device_is_compatible(np, "intel,ixp43x-interrupt") || of_device_is_compatible(np, "intel,ixp45x-interrupt") || of_device_is_compatible(np, "intel,ixp46x-interrupt"); ret = ixp4xx_irq_setup(ixi, base, fwnode, is_356); if (ret) pr_crit("IXP4XX: failed to set up irqchip\n"); return ret; } IRQCHIP_DECLARE(ixp42x, "intel,ixp42x-interrupt", ixp4xx_of_init_irq); IRQCHIP_DECLARE(ixp43x, "intel,ixp43x-interrupt", ixp4xx_of_init_irq); IRQCHIP_DECLARE(ixp45x, "intel,ixp45x-interrupt", ixp4xx_of_init_irq); IRQCHIP_DECLARE(ixp46x, "intel,ixp46x-interrupt", ixp4xx_of_init_irq);
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB /* Copyright (c) 2020 Mellanox Technologies Ltd. */ #include <linux/mlx5/driver.h> #include "eswitch.h" static void mlx5_esw_get_port_parent_id(struct mlx5_core_dev *dev, struct netdev_phys_item_id *ppid) { u64 parent_id; parent_id = mlx5_query_nic_system_image_guid(dev); ppid->id_len = sizeof(parent_id); memcpy(ppid->id, &parent_id, sizeof(parent_id)); } static bool mlx5_esw_devlink_port_supported(struct mlx5_eswitch *esw, u16 vport_num) { return (mlx5_core_is_ecpf(esw->dev) && vport_num == MLX5_VPORT_PF) || mlx5_eswitch_is_vf_vport(esw, vport_num) || mlx5_core_is_ec_vf_vport(esw->dev, vport_num); } static void mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(struct mlx5_eswitch *esw, u16 vport_num, struct devlink_port *dl_port) { struct mlx5_core_dev *dev = esw->dev; struct netdev_phys_item_id ppid = {}; u32 controller_num = 0; bool external; u16 pfnum; mlx5_esw_get_port_parent_id(dev, &ppid); pfnum = mlx5_get_dev_index(dev); external = mlx5_core_is_ecpf_esw_manager(dev); if (external) controller_num = dev->priv.eswitch->offloads.host_number + 1; if (vport_num == MLX5_VPORT_PF) { memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; devlink_port_attrs_pci_pf_set(dl_port, controller_num, pfnum, external); } else if (mlx5_eswitch_is_vf_vport(esw, vport_num)) { memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; devlink_port_attrs_pci_vf_set(dl_port, controller_num, pfnum, vport_num - 1, external); } else if (mlx5_core_is_ec_vf_vport(esw->dev, vport_num)) { memcpy(dl_port->attrs.switch_id.id, ppid.id, ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; devlink_port_attrs_pci_vf_set(dl_port, 0, pfnum, vport_num - 1, false); } } int mlx5_esw_offloads_pf_vf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { struct mlx5_devlink_port *dl_port; u16 vport_num = vport->vport; if (!mlx5_esw_devlink_port_supported(esw, vport_num)) return 0; dl_port = kzalloc(sizeof(*dl_port), GFP_KERNEL); if (!dl_port) return -ENOMEM; mlx5_esw_offloads_pf_vf_devlink_port_attrs_set(esw, vport_num, &dl_port->dl_port); vport->dl_port = dl_port; mlx5_devlink_port_init(dl_port, vport); return 0; } void mlx5_esw_offloads_pf_vf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { if (!vport->dl_port) return; kfree(vport->dl_port); vport->dl_port = NULL; } static const struct devlink_port_ops mlx5_esw_pf_vf_dl_port_ops = { .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, .port_fn_migratable_get = mlx5_devlink_port_fn_migratable_get, .port_fn_migratable_set = mlx5_devlink_port_fn_migratable_set, #ifdef CONFIG_XFRM_OFFLOAD .port_fn_ipsec_crypto_get = mlx5_devlink_port_fn_ipsec_crypto_get, .port_fn_ipsec_crypto_set = mlx5_devlink_port_fn_ipsec_crypto_set, .port_fn_ipsec_packet_get = mlx5_devlink_port_fn_ipsec_packet_get, .port_fn_ipsec_packet_set = mlx5_devlink_port_fn_ipsec_packet_set, #endif /* CONFIG_XFRM_OFFLOAD */ .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get, .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set, }; static void mlx5_esw_offloads_sf_devlink_port_attrs_set(struct mlx5_eswitch *esw, struct devlink_port *dl_port, u32 controller, u32 sfnum) { struct mlx5_core_dev *dev = esw->dev; struct netdev_phys_item_id ppid = {}; u16 pfnum; pfnum = mlx5_get_dev_index(dev); mlx5_esw_get_port_parent_id(dev, &ppid); memcpy(dl_port->attrs.switch_id.id, &ppid.id[0], ppid.id_len); dl_port->attrs.switch_id.id_len = ppid.id_len; devlink_port_attrs_pci_sf_set(dl_port, controller, pfnum, sfnum, !!controller); } int mlx5_esw_offloads_sf_devlink_port_init(struct mlx5_eswitch *esw, struct mlx5_vport *vport, struct mlx5_devlink_port *dl_port, u32 controller, u32 sfnum) { mlx5_esw_offloads_sf_devlink_port_attrs_set(esw, &dl_port->dl_port, controller, sfnum); vport->dl_port = dl_port; mlx5_devlink_port_init(dl_port, vport); return 0; } void mlx5_esw_offloads_sf_devlink_port_cleanup(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { vport->dl_port = NULL; } static const struct devlink_port_ops mlx5_esw_dl_sf_port_ops = { #ifdef CONFIG_MLX5_SF_MANAGER .port_del = mlx5_devlink_sf_port_del, #endif .port_fn_hw_addr_get = mlx5_devlink_port_fn_hw_addr_get, .port_fn_hw_addr_set = mlx5_devlink_port_fn_hw_addr_set, .port_fn_roce_get = mlx5_devlink_port_fn_roce_get, .port_fn_roce_set = mlx5_devlink_port_fn_roce_set, #ifdef CONFIG_MLX5_SF_MANAGER .port_fn_state_get = mlx5_devlink_sf_port_fn_state_get, .port_fn_state_set = mlx5_devlink_sf_port_fn_state_set, #endif .port_fn_max_io_eqs_get = mlx5_devlink_port_fn_max_io_eqs_get, .port_fn_max_io_eqs_set = mlx5_devlink_port_fn_max_io_eqs_set, }; int mlx5_esw_offloads_devlink_port_register(struct mlx5_eswitch *esw, struct mlx5_vport *vport) { struct mlx5_core_dev *dev = esw->dev; const struct devlink_port_ops *ops; struct mlx5_devlink_port *dl_port; u16 vport_num = vport->vport; unsigned int dl_port_index; struct devlink *devlink; int err; dl_port = vport->dl_port; if (!dl_port) return 0; if (mlx5_esw_is_sf_vport(esw, vport_num)) ops = &mlx5_esw_dl_sf_port_ops; else if (mlx5_eswitch_is_pf_vf_vport(esw, vport_num)) ops = &mlx5_esw_pf_vf_dl_port_ops; else ops = NULL; devlink = priv_to_devlink(dev); dl_port_index = mlx5_esw_vport_to_devlink_port_index(dev, vport_num); err = devl_port_register_with_ops(devlink, &dl_port->dl_port, dl_port_index, ops); if (err) return err; err = devl_rate_leaf_create(&dl_port->dl_port, vport, NULL); if (err) goto rate_err; return 0; rate_err: devl_port_unregister(&dl_port->dl_port); return err; } void mlx5_esw_offloads_devlink_port_unregister(struct mlx5_vport *vport) { struct mlx5_devlink_port *dl_port; if (!vport->dl_port) return; dl_port = vport->dl_port; mlx5_esw_qos_vport_update_parent(vport, NULL, NULL); devl_rate_leaf_destroy(&dl_port->dl_port); devl_port_unregister(&dl_port->dl_port); } struct devlink_port *mlx5_esw_offloads_devlink_port(struct mlx5_eswitch *esw, u16 vport_num) { struct mlx5_vport *vport; vport = mlx5_eswitch_get_vport(esw, vport_num); return IS_ERR(vport) ? ERR_CAST(vport) : &vport->dl_port->dl_port; }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015-2016 MediaTek Inc. * Author: Houlong Wei <[email protected]> * Ming Hsiu Tsai <[email protected]> */ #ifndef __MTK_MDP_IPI_H__ #define __MTK_MDP_IPI_H__ #define MTK_MDP_MAX_NUM_PLANE 3 enum mdp_ipi_msgid { AP_MDP_INIT = 0xd000, AP_MDP_DEINIT = 0xd001, AP_MDP_PROCESS = 0xd002, VPU_MDP_INIT_ACK = 0xe000, VPU_MDP_DEINIT_ACK = 0xe001, VPU_MDP_PROCESS_ACK = 0xe002 }; #pragma pack(push, 4) /** * struct mdp_ipi_init - for AP_MDP_INIT * @msg_id : AP_MDP_INIT * @ipi_id : IPI_MDP * @ap_inst : AP mtk_mdp_vpu address */ struct mdp_ipi_init { uint32_t msg_id; uint32_t ipi_id; uint64_t ap_inst; }; /** * struct mdp_ipi_comm - for AP_MDP_PROCESS, AP_MDP_DEINIT * @msg_id : AP_MDP_PROCESS, AP_MDP_DEINIT * @ipi_id : IPI_MDP * @ap_inst : AP mtk_mdp_vpu address * @vpu_inst_addr : VPU MDP instance address * @padding : Alignment padding */ struct mdp_ipi_comm { uint32_t msg_id; uint32_t ipi_id; uint64_t ap_inst; uint32_t vpu_inst_addr; uint32_t padding; }; /** * struct mdp_ipi_comm_ack - for VPU_MDP_DEINIT_ACK, VPU_MDP_PROCESS_ACK * @msg_id : VPU_MDP_DEINIT_ACK, VPU_MDP_PROCESS_ACK * @ipi_id : IPI_MDP * @ap_inst : AP mtk_mdp_vpu address * @vpu_inst_addr : VPU MDP instance address * @status : VPU exeuction result */ struct mdp_ipi_comm_ack { uint32_t msg_id; uint32_t ipi_id; uint64_t ap_inst; uint32_t vpu_inst_addr; int32_t status; }; /** * struct mdp_config - configured for source/destination image * @x : left * @y : top * @w : width * @h : height * @w_stride : bytes in horizontal * @h_stride : bytes in vertical * @crop_x : cropped left * @crop_y : cropped top * @crop_w : cropped width * @crop_h : cropped height * @format : color format */ struct mdp_config { int32_t x; int32_t y; int32_t w; int32_t h; int32_t w_stride; int32_t h_stride; int32_t crop_x; int32_t crop_y; int32_t crop_w; int32_t crop_h; int32_t format; }; struct mdp_buffer { uint64_t addr_mva[MTK_MDP_MAX_NUM_PLANE]; int32_t plane_size[MTK_MDP_MAX_NUM_PLANE]; int32_t plane_num; }; struct mdp_config_misc { int32_t orientation; /* 0, 90, 180, 270 */ int32_t hflip; /* 1 will enable the flip */ int32_t vflip; /* 1 will enable the flip */ int32_t alpha; /* global alpha */ }; struct mdp_process_vsi { struct mdp_config src_config; struct mdp_buffer src_buffer; struct mdp_config dst_config; struct mdp_buffer dst_buffer; struct mdp_config_misc misc; }; #pragma pack(pop) #endif /* __MTK_MDP_IPI_H__ */
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DC_RESOURCE_DCE120_H__ #define __DC_RESOURCE_DCE120_H__ #include "core_types.h" struct dc; struct resource_pool; struct resource_pool *dce120_create_resource_pool( uint8_t num_virtual_links, struct dc *dc); #endif /* __DC_RESOURCE_DCE120_H__ */
// SPDX-License-Identifier: GPL-2.0 /* * Microsemi Switchtec(tm) PCIe Management Driver * Copyright (c) 2017, Microsemi Corporation */ #include <linux/switchtec.h> #include <linux/switchtec_ioctl.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/poll.h> #include <linux/wait.h> #include <linux/io-64-nonatomic-lo-hi.h> #include <linux/nospec.h> MODULE_DESCRIPTION("Microsemi Switchtec(tm) PCIe Management Driver"); MODULE_VERSION("0.1"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Microsemi Corporation"); static int max_devices = 16; module_param(max_devices, int, 0644); MODULE_PARM_DESC(max_devices, "max number of switchtec device instances"); static bool use_dma_mrpc = true; module_param(use_dma_mrpc, bool, 0644); MODULE_PARM_DESC(use_dma_mrpc, "Enable the use of the DMA MRPC feature"); static int nirqs = 32; module_param(nirqs, int, 0644); MODULE_PARM_DESC(nirqs, "number of interrupts to allocate (more may be useful for NTB applications)"); static dev_t switchtec_devt; static DEFINE_IDA(switchtec_minor_ida); const struct class switchtec_class = { .name = "switchtec", }; EXPORT_SYMBOL_GPL(switchtec_class); enum mrpc_state { MRPC_IDLE = 0, MRPC_QUEUED, MRPC_RUNNING, MRPC_DONE, MRPC_IO_ERROR, }; struct switchtec_user { struct switchtec_dev *stdev; enum mrpc_state state; wait_queue_head_t cmd_comp; struct kref kref; struct list_head list; bool cmd_done; u32 cmd; u32 status; u32 return_code; size_t data_len; size_t read_len; unsigned char data[SWITCHTEC_MRPC_PAYLOAD_SIZE]; int event_cnt; }; /* * The MMIO reads to the device_id register should always return the device ID * of the device, otherwise the firmware is probably stuck or unreachable * due to a firmware reset which clears PCI state including the BARs and Memory * Space Enable bits. */ static int is_firmware_running(struct switchtec_dev *stdev) { u32 device = ioread32(&stdev->mmio_sys_info->device_id); return stdev->pdev->device == device; } static struct switchtec_user *stuser_create(struct switchtec_dev *stdev) { struct switchtec_user *stuser; stuser = kzalloc(sizeof(*stuser), GFP_KERNEL); if (!stuser) return ERR_PTR(-ENOMEM); get_device(&stdev->dev); stuser->stdev = stdev; kref_init(&stuser->kref); INIT_LIST_HEAD(&stuser->list); init_waitqueue_head(&stuser->cmd_comp); stuser->event_cnt = atomic_read(&stdev->event_cnt); dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); return stuser; } static void stuser_free(struct kref *kref) { struct switchtec_user *stuser; stuser = container_of(kref, struct switchtec_user, kref); dev_dbg(&stuser->stdev->dev, "%s: %p\n", __func__, stuser); put_device(&stuser->stdev->dev); kfree(stuser); } static void stuser_put(struct switchtec_user *stuser) { kref_put(&stuser->kref, stuser_free); } static void stuser_set_state(struct switchtec_user *stuser, enum mrpc_state state) { /* requires the mrpc_mutex to already be held when called */ static const char * const state_names[] = { [MRPC_IDLE] = "IDLE", [MRPC_QUEUED] = "QUEUED", [MRPC_RUNNING] = "RUNNING", [MRPC_DONE] = "DONE", [MRPC_IO_ERROR] = "IO_ERROR", }; stuser->state = state; dev_dbg(&stuser->stdev->dev, "stuser state %p -> %s", stuser, state_names[state]); } static void mrpc_complete_cmd(struct switchtec_dev *stdev); static void flush_wc_buf(struct switchtec_dev *stdev) { struct ntb_dbmsg_regs __iomem *mmio_dbmsg; /* * odb (outbound doorbell) register is processed by low latency * hardware and w/o side effect */ mmio_dbmsg = (void __iomem *)stdev->mmio_ntb + SWITCHTEC_NTB_REG_DBMSG_OFFSET; ioread32(&mmio_dbmsg->odb); } static void mrpc_cmd_submit(struct switchtec_dev *stdev) { /* requires the mrpc_mutex to already be held when called */ struct switchtec_user *stuser; if (stdev->mrpc_busy) return; if (list_empty(&stdev->mrpc_queue)) return; stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user, list); if (stdev->dma_mrpc) { stdev->dma_mrpc->status = SWITCHTEC_MRPC_STATUS_INPROGRESS; memset(stdev->dma_mrpc->data, 0xFF, SWITCHTEC_MRPC_PAYLOAD_SIZE); } stuser_set_state(stuser, MRPC_RUNNING); stdev->mrpc_busy = 1; memcpy_toio(&stdev->mmio_mrpc->input_data, stuser->data, stuser->data_len); flush_wc_buf(stdev); iowrite32(stuser->cmd, &stdev->mmio_mrpc->cmd); schedule_delayed_work(&stdev->mrpc_timeout, msecs_to_jiffies(500)); } static int mrpc_queue_cmd(struct switchtec_user *stuser) { /* requires the mrpc_mutex to already be held when called */ struct switchtec_dev *stdev = stuser->stdev; kref_get(&stuser->kref); stuser->read_len = sizeof(stuser->data); stuser_set_state(stuser, MRPC_QUEUED); stuser->cmd_done = false; list_add_tail(&stuser->list, &stdev->mrpc_queue); mrpc_cmd_submit(stdev); return 0; } static void mrpc_cleanup_cmd(struct switchtec_dev *stdev) { /* requires the mrpc_mutex to already be held when called */ struct switchtec_user *stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user, list); stuser->cmd_done = true; wake_up_interruptible(&stuser->cmd_comp); list_del_init(&stuser->list); stuser_put(stuser); stdev->mrpc_busy = 0; mrpc_cmd_submit(stdev); } static void mrpc_complete_cmd(struct switchtec_dev *stdev) { /* requires the mrpc_mutex to already be held when called */ struct switchtec_user *stuser; if (list_empty(&stdev->mrpc_queue)) return; stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user, list); if (stdev->dma_mrpc) stuser->status = stdev->dma_mrpc->status; else stuser->status = ioread32(&stdev->mmio_mrpc->status); if (stuser->status == SWITCHTEC_MRPC_STATUS_INPROGRESS) return; stuser_set_state(stuser, MRPC_DONE); stuser->return_code = 0; if (stuser->status != SWITCHTEC_MRPC_STATUS_DONE && stuser->status != SWITCHTEC_MRPC_STATUS_ERROR) goto out; if (stdev->dma_mrpc) stuser->return_code = stdev->dma_mrpc->rtn_code; else stuser->return_code = ioread32(&stdev->mmio_mrpc->ret_value); if (stuser->return_code != 0) goto out; if (stdev->dma_mrpc) memcpy(stuser->data, &stdev->dma_mrpc->data, stuser->read_len); else memcpy_fromio(stuser->data, &stdev->mmio_mrpc->output_data, stuser->read_len); out: mrpc_cleanup_cmd(stdev); } static void mrpc_event_work(struct work_struct *work) { struct switchtec_dev *stdev; stdev = container_of(work, struct switchtec_dev, mrpc_work); dev_dbg(&stdev->dev, "%s\n", __func__); mutex_lock(&stdev->mrpc_mutex); cancel_delayed_work(&stdev->mrpc_timeout); mrpc_complete_cmd(stdev); mutex_unlock(&stdev->mrpc_mutex); } static void mrpc_error_complete_cmd(struct switchtec_dev *stdev) { /* requires the mrpc_mutex to already be held when called */ struct switchtec_user *stuser; if (list_empty(&stdev->mrpc_queue)) return; stuser = list_entry(stdev->mrpc_queue.next, struct switchtec_user, list); stuser_set_state(stuser, MRPC_IO_ERROR); mrpc_cleanup_cmd(stdev); } static void mrpc_timeout_work(struct work_struct *work) { struct switchtec_dev *stdev; u32 status; stdev = container_of(work, struct switchtec_dev, mrpc_timeout.work); dev_dbg(&stdev->dev, "%s\n", __func__); mutex_lock(&stdev->mrpc_mutex); if (!is_firmware_running(stdev)) { mrpc_error_complete_cmd(stdev); goto out; } if (stdev->dma_mrpc) status = stdev->dma_mrpc->status; else status = ioread32(&stdev->mmio_mrpc->status); if (status == SWITCHTEC_MRPC_STATUS_INPROGRESS) { schedule_delayed_work(&stdev->mrpc_timeout, msecs_to_jiffies(500)); goto out; } mrpc_complete_cmd(stdev); out: mutex_unlock(&stdev->mrpc_mutex); } static ssize_t device_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switchtec_dev *stdev = to_stdev(dev); u32 ver; ver = ioread32(&stdev->mmio_sys_info->device_version); return sysfs_emit(buf, "%x\n", ver); } static DEVICE_ATTR_RO(device_version); static ssize_t fw_version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switchtec_dev *stdev = to_stdev(dev); u32 ver; ver = ioread32(&stdev->mmio_sys_info->firmware_version); return sysfs_emit(buf, "%08x\n", ver); } static DEVICE_ATTR_RO(fw_version); static ssize_t io_string_show(char *buf, void __iomem *attr, size_t len) { int i; memcpy_fromio(buf, attr, len); buf[len] = '\n'; buf[len + 1] = 0; for (i = len - 1; i > 0; i--) { if (buf[i] != ' ') break; buf[i] = '\n'; buf[i + 1] = 0; } return strlen(buf); } #define DEVICE_ATTR_SYS_INFO_STR(field) \ static ssize_t field ## _show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct switchtec_dev *stdev = to_stdev(dev); \ struct sys_info_regs __iomem *si = stdev->mmio_sys_info; \ if (stdev->gen == SWITCHTEC_GEN3) \ return io_string_show(buf, &si->gen3.field, \ sizeof(si->gen3.field)); \ else if (stdev->gen >= SWITCHTEC_GEN4) \ return io_string_show(buf, &si->gen4.field, \ sizeof(si->gen4.field)); \ else \ return -EOPNOTSUPP; \ } \ \ static DEVICE_ATTR_RO(field) DEVICE_ATTR_SYS_INFO_STR(vendor_id); DEVICE_ATTR_SYS_INFO_STR(product_id); DEVICE_ATTR_SYS_INFO_STR(product_revision); static ssize_t component_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switchtec_dev *stdev = to_stdev(dev); struct sys_info_regs __iomem *si = stdev->mmio_sys_info; /* component_vendor field not supported after gen3 */ if (stdev->gen != SWITCHTEC_GEN3) return sysfs_emit(buf, "none\n"); return io_string_show(buf, &si->gen3.component_vendor, sizeof(si->gen3.component_vendor)); } static DEVICE_ATTR_RO(component_vendor); static ssize_t component_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switchtec_dev *stdev = to_stdev(dev); int id = ioread16(&stdev->mmio_sys_info->gen3.component_id); /* component_id field not supported after gen3 */ if (stdev->gen != SWITCHTEC_GEN3) return sysfs_emit(buf, "none\n"); return sysfs_emit(buf, "PM%04X\n", id); } static DEVICE_ATTR_RO(component_id); static ssize_t component_revision_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switchtec_dev *stdev = to_stdev(dev); int rev = ioread8(&stdev->mmio_sys_info->gen3.component_revision); /* component_revision field not supported after gen3 */ if (stdev->gen != SWITCHTEC_GEN3) return sysfs_emit(buf, "255\n"); return sysfs_emit(buf, "%d\n", rev); } static DEVICE_ATTR_RO(component_revision); static ssize_t partition_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switchtec_dev *stdev = to_stdev(dev); return sysfs_emit(buf, "%d\n", stdev->partition); } static DEVICE_ATTR_RO(partition); static ssize_t partition_count_show(struct device *dev, struct device_attribute *attr, char *buf) { struct switchtec_dev *stdev = to_stdev(dev); return sysfs_emit(buf, "%d\n", stdev->partition_count); } static DEVICE_ATTR_RO(partition_count); static struct attribute *switchtec_device_attrs[] = { &dev_attr_device_version.attr, &dev_attr_fw_version.attr, &dev_attr_vendor_id.attr, &dev_attr_product_id.attr, &dev_attr_product_revision.attr, &dev_attr_component_vendor.attr, &dev_attr_component_id.attr, &dev_attr_component_revision.attr, &dev_attr_partition.attr, &dev_attr_partition_count.attr, NULL, }; ATTRIBUTE_GROUPS(switchtec_device); static int switchtec_dev_open(struct inode *inode, struct file *filp) { struct switchtec_dev *stdev; struct switchtec_user *stuser; stdev = container_of(inode->i_cdev, struct switchtec_dev, cdev); stuser = stuser_create(stdev); if (IS_ERR(stuser)) return PTR_ERR(stuser); filp->private_data = stuser; stream_open(inode, filp); dev_dbg(&stdev->dev, "%s: %p\n", __func__, stuser); return 0; } static int switchtec_dev_release(struct inode *inode, struct file *filp) { struct switchtec_user *stuser = filp->private_data; stuser_put(stuser); return 0; } static int lock_mutex_and_test_alive(struct switchtec_dev *stdev) { if (mutex_lock_interruptible(&stdev->mrpc_mutex)) return -EINTR; if (!stdev->alive) { mutex_unlock(&stdev->mrpc_mutex); return -ENODEV; } return 0; } static ssize_t switchtec_dev_write(struct file *filp, const char __user *data, size_t size, loff_t *off) { struct switchtec_user *stuser = filp->private_data; struct switchtec_dev *stdev = stuser->stdev; int rc; if (size < sizeof(stuser->cmd) || size > sizeof(stuser->cmd) + sizeof(stuser->data)) return -EINVAL; stuser->data_len = size - sizeof(stuser->cmd); rc = lock_mutex_and_test_alive(stdev); if (rc) return rc; if (stuser->state != MRPC_IDLE) { rc = -EBADE; goto out; } rc = copy_from_user(&stuser->cmd, data, sizeof(stuser->cmd)); if (rc) { rc = -EFAULT; goto out; } if (((MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_WRITE) || (MRPC_CMD_ID(stuser->cmd) == MRPC_GAS_READ)) && !capable(CAP_SYS_ADMIN)) { rc = -EPERM; goto out; } data += sizeof(stuser->cmd); rc = copy_from_user(&stuser->data, data, size - sizeof(stuser->cmd)); if (rc) { rc = -EFAULT; goto out; } rc = mrpc_queue_cmd(stuser); out: mutex_unlock(&stdev->mrpc_mutex); if (rc) return rc; return size; } static ssize_t switchtec_dev_read(struct file *filp, char __user *data, size_t size, loff_t *off) { struct switchtec_user *stuser = filp->private_data; struct switchtec_dev *stdev = stuser->stdev; int rc; if (size < sizeof(stuser->cmd) || size > sizeof(stuser->cmd) + sizeof(stuser->data)) return -EINVAL; rc = lock_mutex_and_test_alive(stdev); if (rc) return rc; if (stuser->state == MRPC_IDLE) { mutex_unlock(&stdev->mrpc_mutex); return -EBADE; } stuser->read_len = size - sizeof(stuser->return_code); mutex_unlock(&stdev->mrpc_mutex); if (filp->f_flags & O_NONBLOCK) { if (!stuser->cmd_done) return -EAGAIN; } else { rc = wait_event_interruptible(stuser->cmd_comp, stuser->cmd_done); if (rc < 0) return rc; } rc = lock_mutex_and_test_alive(stdev); if (rc) return rc; if (stuser->state == MRPC_IO_ERROR) { mutex_unlock(&stdev->mrpc_mutex); return -EIO; } if (stuser->state != MRPC_DONE) { mutex_unlock(&stdev->mrpc_mutex); return -EBADE; } rc = copy_to_user(data, &stuser->return_code, sizeof(stuser->return_code)); if (rc) { mutex_unlock(&stdev->mrpc_mutex); return -EFAULT; } data += sizeof(stuser->return_code); rc = copy_to_user(data, &stuser->data, size - sizeof(stuser->return_code)); if (rc) { mutex_unlock(&stdev->mrpc_mutex); return -EFAULT; } stuser_set_state(stuser, MRPC_IDLE); mutex_unlock(&stdev->mrpc_mutex); if (stuser->status == SWITCHTEC_MRPC_STATUS_DONE || stuser->status == SWITCHTEC_MRPC_STATUS_ERROR) return size; else if (stuser->status == SWITCHTEC_MRPC_STATUS_INTERRUPTED) return -ENXIO; else return -EBADMSG; } static __poll_t switchtec_dev_poll(struct file *filp, poll_table *wait) { struct switchtec_user *stuser = filp->private_data; struct switchtec_dev *stdev = stuser->stdev; __poll_t ret = 0; poll_wait(filp, &stuser->cmd_comp, wait); poll_wait(filp, &stdev->event_wq, wait); if (lock_mutex_and_test_alive(stdev)) return EPOLLIN | EPOLLRDHUP | EPOLLOUT | EPOLLERR | EPOLLHUP; mutex_unlock(&stdev->mrpc_mutex); if (stuser->cmd_done) ret |= EPOLLIN | EPOLLRDNORM; if (stuser->event_cnt != atomic_read(&stdev->event_cnt)) ret |= EPOLLPRI | EPOLLRDBAND; return ret; } static int ioctl_flash_info(struct switchtec_dev *stdev, struct switchtec_ioctl_flash_info __user *uinfo) { struct switchtec_ioctl_flash_info info = {0}; struct flash_info_regs __iomem *fi = stdev->mmio_flash_info; if (stdev->gen == SWITCHTEC_GEN3) { info.flash_length = ioread32(&fi->gen3.flash_length); info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN3; } else if (stdev->gen >= SWITCHTEC_GEN4) { info.flash_length = ioread32(&fi->gen4.flash_length); info.num_partitions = SWITCHTEC_NUM_PARTITIONS_GEN4; } else { return -EOPNOTSUPP; } if (copy_to_user(uinfo, &info, sizeof(info))) return -EFAULT; return 0; } static void set_fw_info_part(struct switchtec_ioctl_flash_part_info *info, struct partition_info __iomem *pi) { info->address = ioread32(&pi->address); info->length = ioread32(&pi->length); } static int flash_part_info_gen3(struct switchtec_dev *stdev, struct switchtec_ioctl_flash_part_info *info) { struct flash_info_regs_gen3 __iomem *fi = &stdev->mmio_flash_info->gen3; struct sys_info_regs_gen3 __iomem *si = &stdev->mmio_sys_info->gen3; u32 active_addr = -1; switch (info->flash_partition) { case SWITCHTEC_IOCTL_PART_CFG0: active_addr = ioread32(&fi->active_cfg); set_fw_info_part(info, &fi->cfg0); if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG0_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_CFG1: active_addr = ioread32(&fi->active_cfg); set_fw_info_part(info, &fi->cfg1); if (ioread16(&si->cfg_running) == SWITCHTEC_GEN3_CFG1_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_IMG0: active_addr = ioread32(&fi->active_img); set_fw_info_part(info, &fi->img0); if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG0_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_IMG1: active_addr = ioread32(&fi->active_img); set_fw_info_part(info, &fi->img1); if (ioread16(&si->img_running) == SWITCHTEC_GEN3_IMG1_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_NVLOG: set_fw_info_part(info, &fi->nvlog); break; case SWITCHTEC_IOCTL_PART_VENDOR0: set_fw_info_part(info, &fi->vendor[0]); break; case SWITCHTEC_IOCTL_PART_VENDOR1: set_fw_info_part(info, &fi->vendor[1]); break; case SWITCHTEC_IOCTL_PART_VENDOR2: set_fw_info_part(info, &fi->vendor[2]); break; case SWITCHTEC_IOCTL_PART_VENDOR3: set_fw_info_part(info, &fi->vendor[3]); break; case SWITCHTEC_IOCTL_PART_VENDOR4: set_fw_info_part(info, &fi->vendor[4]); break; case SWITCHTEC_IOCTL_PART_VENDOR5: set_fw_info_part(info, &fi->vendor[5]); break; case SWITCHTEC_IOCTL_PART_VENDOR6: set_fw_info_part(info, &fi->vendor[6]); break; case SWITCHTEC_IOCTL_PART_VENDOR7: set_fw_info_part(info, &fi->vendor[7]); break; default: return -EINVAL; } if (info->address == active_addr) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE; return 0; } static int flash_part_info_gen4(struct switchtec_dev *stdev, struct switchtec_ioctl_flash_part_info *info) { struct flash_info_regs_gen4 __iomem *fi = &stdev->mmio_flash_info->gen4; struct sys_info_regs_gen4 __iomem *si = &stdev->mmio_sys_info->gen4; struct active_partition_info_gen4 __iomem *af = &fi->active_flag; switch (info->flash_partition) { case SWITCHTEC_IOCTL_PART_MAP_0: set_fw_info_part(info, &fi->map0); break; case SWITCHTEC_IOCTL_PART_MAP_1: set_fw_info_part(info, &fi->map1); break; case SWITCHTEC_IOCTL_PART_KEY_0: set_fw_info_part(info, &fi->key0); if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY0_ACTIVE) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE; if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY0_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_KEY_1: set_fw_info_part(info, &fi->key1); if (ioread8(&af->key) == SWITCHTEC_GEN4_KEY1_ACTIVE) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE; if (ioread16(&si->key_running) == SWITCHTEC_GEN4_KEY1_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_BL2_0: set_fw_info_part(info, &fi->bl2_0); if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_0_ACTIVE) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE; if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_0_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_BL2_1: set_fw_info_part(info, &fi->bl2_1); if (ioread8(&af->bl2) == SWITCHTEC_GEN4_BL2_1_ACTIVE) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE; if (ioread16(&si->bl2_running) == SWITCHTEC_GEN4_BL2_1_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_CFG0: set_fw_info_part(info, &fi->cfg0); if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG0_ACTIVE) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE; if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG0_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_CFG1: set_fw_info_part(info, &fi->cfg1); if (ioread8(&af->cfg) == SWITCHTEC_GEN4_CFG1_ACTIVE) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE; if (ioread16(&si->cfg_running) == SWITCHTEC_GEN4_CFG1_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_IMG0: set_fw_info_part(info, &fi->img0); if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG0_ACTIVE) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE; if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG0_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_IMG1: set_fw_info_part(info, &fi->img1); if (ioread8(&af->img) == SWITCHTEC_GEN4_IMG1_ACTIVE) info->active |= SWITCHTEC_IOCTL_PART_ACTIVE; if (ioread16(&si->img_running) == SWITCHTEC_GEN4_IMG1_RUNNING) info->active |= SWITCHTEC_IOCTL_PART_RUNNING; break; case SWITCHTEC_IOCTL_PART_NVLOG: set_fw_info_part(info, &fi->nvlog); break; case SWITCHTEC_IOCTL_PART_VENDOR0: set_fw_info_part(info, &fi->vendor[0]); break; case SWITCHTEC_IOCTL_PART_VENDOR1: set_fw_info_part(info, &fi->vendor[1]); break; case SWITCHTEC_IOCTL_PART_VENDOR2: set_fw_info_part(info, &fi->vendor[2]); break; case SWITCHTEC_IOCTL_PART_VENDOR3: set_fw_info_part(info, &fi->vendor[3]); break; case SWITCHTEC_IOCTL_PART_VENDOR4: set_fw_info_part(info, &fi->vendor[4]); break; case SWITCHTEC_IOCTL_PART_VENDOR5: set_fw_info_part(info, &fi->vendor[5]); break; case SWITCHTEC_IOCTL_PART_VENDOR6: set_fw_info_part(info, &fi->vendor[6]); break; case SWITCHTEC_IOCTL_PART_VENDOR7: set_fw_info_part(info, &fi->vendor[7]); break; default: return -EINVAL; } return 0; } static int ioctl_flash_part_info(struct switchtec_dev *stdev, struct switchtec_ioctl_flash_part_info __user *uinfo) { int ret; struct switchtec_ioctl_flash_part_info info = {0}; if (copy_from_user(&info, uinfo, sizeof(info))) return -EFAULT; if (stdev->gen == SWITCHTEC_GEN3) { ret = flash_part_info_gen3(stdev, &info); if (ret) return ret; } else if (stdev->gen >= SWITCHTEC_GEN4) { ret = flash_part_info_gen4(stdev, &info); if (ret) return ret; } else { return -EOPNOTSUPP; } if (copy_to_user(uinfo, &info, sizeof(info))) return -EFAULT; return 0; } static int ioctl_event_summary(struct switchtec_dev *stdev, struct switchtec_user *stuser, struct switchtec_ioctl_event_summary __user *usum, size_t size) { struct switchtec_ioctl_event_summary *s; int i; u32 reg; int ret = 0; s = kzalloc(sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; s->global = ioread32(&stdev->mmio_sw_event->global_summary); s->part_bitmap = ioread64(&stdev->mmio_sw_event->part_event_bitmap); s->local_part = ioread32(&stdev->mmio_part_cfg->part_event_summary); for (i = 0; i < stdev->partition_count; i++) { reg = ioread32(&stdev->mmio_part_cfg_all[i].part_event_summary); s->part[i] = reg; } for (i = 0; i < stdev->pff_csr_count; i++) { reg = ioread32(&stdev->mmio_pff_csr[i].pff_event_summary); s->pff[i] = reg; } if (copy_to_user(usum, s, size)) { ret = -EFAULT; goto error_case; } stuser->event_cnt = atomic_read(&stdev->event_cnt); error_case: kfree(s); return ret; } static u32 __iomem *global_ev_reg(struct switchtec_dev *stdev, size_t offset, int index) { return (void __iomem *)stdev->mmio_sw_event + offset; } static u32 __iomem *part_ev_reg(struct switchtec_dev *stdev, size_t offset, int index) { return (void __iomem *)&stdev->mmio_part_cfg_all[index] + offset; } static u32 __iomem *pff_ev_reg(struct switchtec_dev *stdev, size_t offset, int index) { return (void __iomem *)&stdev->mmio_pff_csr[index] + offset; } #define EV_GLB(i, r)[i] = {offsetof(struct sw_event_regs, r), global_ev_reg} #define EV_PAR(i, r)[i] = {offsetof(struct part_cfg_regs, r), part_ev_reg} #define EV_PFF(i, r)[i] = {offsetof(struct pff_csr_regs, r), pff_ev_reg} static const struct event_reg { size_t offset; u32 __iomem *(*map_reg)(struct switchtec_dev *stdev, size_t offset, int index); } event_regs[] = { EV_GLB(SWITCHTEC_IOCTL_EVENT_STACK_ERROR, stack_error_event_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_PPU_ERROR, ppu_error_event_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_ISP_ERROR, isp_error_event_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_SYS_RESET, sys_reset_event_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_EXC, fw_exception_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NMI, fw_nmi_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL, fw_non_fatal_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_FW_FATAL, fw_fatal_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP, twi_mrpc_comp_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC, twi_mrpc_comp_async_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP, cli_mrpc_comp_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC, cli_mrpc_comp_async_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_GPIO_INT, gpio_interrupt_hdr), EV_GLB(SWITCHTEC_IOCTL_EVENT_GFMS, gfms_event_hdr), EV_PAR(SWITCHTEC_IOCTL_EVENT_PART_RESET, part_reset_hdr), EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP, mrpc_comp_hdr), EV_PAR(SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC, mrpc_comp_async_hdr), EV_PAR(SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP, dyn_binding_hdr), EV_PAR(SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY, intercomm_notify_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_P2P, aer_in_p2p_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_AER_IN_VEP, aer_in_vep_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_DPC, dpc_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_CTS, cts_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_UEC, uec_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_HOTPLUG, hotplug_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_IER, ier_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_THRESH, threshold_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_POWER_MGMT, power_mgmt_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING, tlp_throttling_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_FORCE_SPEED, force_speed_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT, credit_timeout_hdr), EV_PFF(SWITCHTEC_IOCTL_EVENT_LINK_STATE, link_state_hdr), }; static u32 __iomem *event_hdr_addr(struct switchtec_dev *stdev, int event_id, int index) { size_t off; if (event_id < 0 || event_id >= SWITCHTEC_IOCTL_MAX_EVENTS) return (u32 __iomem *)ERR_PTR(-EINVAL); off = event_regs[event_id].offset; if (event_regs[event_id].map_reg == part_ev_reg) { if (index == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX) index = stdev->partition; else if (index < 0 || index >= stdev->partition_count) return (u32 __iomem *)ERR_PTR(-EINVAL); } else if (event_regs[event_id].map_reg == pff_ev_reg) { if (index < 0 || index >= stdev->pff_csr_count) return (u32 __iomem *)ERR_PTR(-EINVAL); } return event_regs[event_id].map_reg(stdev, off, index); } static int event_ctl(struct switchtec_dev *stdev, struct switchtec_ioctl_event_ctl *ctl) { int i; u32 __iomem *reg; u32 hdr; reg = event_hdr_addr(stdev, ctl->event_id, ctl->index); if (IS_ERR(reg)) return PTR_ERR(reg); hdr = ioread32(reg); if (hdr & SWITCHTEC_EVENT_NOT_SUPP) return -EOPNOTSUPP; for (i = 0; i < ARRAY_SIZE(ctl->data); i++) ctl->data[i] = ioread32(&reg[i + 1]); ctl->occurred = hdr & SWITCHTEC_EVENT_OCCURRED; ctl->count = (hdr >> 5) & 0xFF; if (!(ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR)) hdr &= ~SWITCHTEC_EVENT_CLEAR; if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL) hdr |= SWITCHTEC_EVENT_EN_IRQ; if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL) hdr &= ~SWITCHTEC_EVENT_EN_IRQ; if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG) hdr |= SWITCHTEC_EVENT_EN_LOG; if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG) hdr &= ~SWITCHTEC_EVENT_EN_LOG; if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI) hdr |= SWITCHTEC_EVENT_EN_CLI; if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI) hdr &= ~SWITCHTEC_EVENT_EN_CLI; if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL) hdr |= SWITCHTEC_EVENT_FATAL; if (ctl->flags & SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL) hdr &= ~SWITCHTEC_EVENT_FATAL; if (ctl->flags) iowrite32(hdr, reg); ctl->flags = 0; if (hdr & SWITCHTEC_EVENT_EN_IRQ) ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL; if (hdr & SWITCHTEC_EVENT_EN_LOG) ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG; if (hdr & SWITCHTEC_EVENT_EN_CLI) ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI; if (hdr & SWITCHTEC_EVENT_FATAL) ctl->flags |= SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL; return 0; } static int ioctl_event_ctl(struct switchtec_dev *stdev, struct switchtec_ioctl_event_ctl __user *uctl) { int ret; int nr_idxs; unsigned int event_flags; struct switchtec_ioctl_event_ctl ctl; if (copy_from_user(&ctl, uctl, sizeof(ctl))) return -EFAULT; if (ctl.event_id >= SWITCHTEC_IOCTL_MAX_EVENTS) return -EINVAL; if (ctl.flags & SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED) return -EINVAL; if (ctl.index == SWITCHTEC_IOCTL_EVENT_IDX_ALL) { if (event_regs[ctl.event_id].map_reg == global_ev_reg) nr_idxs = 1; else if (event_regs[ctl.event_id].map_reg == part_ev_reg) nr_idxs = stdev->partition_count; else if (event_regs[ctl.event_id].map_reg == pff_ev_reg) nr_idxs = stdev->pff_csr_count; else return -EINVAL; event_flags = ctl.flags; for (ctl.index = 0; ctl.index < nr_idxs; ctl.index++) { ctl.flags = event_flags; ret = event_ctl(stdev, &ctl); if (ret < 0 && ret != -EOPNOTSUPP) return ret; } } else { ret = event_ctl(stdev, &ctl); if (ret < 0) return ret; } if (copy_to_user(uctl, &ctl, sizeof(ctl))) return -EFAULT; return 0; } static int ioctl_pff_to_port(struct switchtec_dev *stdev, struct switchtec_ioctl_pff_port __user *up) { int i, part; u32 reg; struct part_cfg_regs __iomem *pcfg; struct switchtec_ioctl_pff_port p; if (copy_from_user(&p, up, sizeof(p))) return -EFAULT; p.port = -1; for (part = 0; part < stdev->partition_count; part++) { pcfg = &stdev->mmio_part_cfg_all[part]; p.partition = part; reg = ioread32(&pcfg->usp_pff_inst_id); if (reg == p.pff) { p.port = 0; break; } reg = ioread32(&pcfg->vep_pff_inst_id) & 0xFF; if (reg == p.pff) { p.port = SWITCHTEC_IOCTL_PFF_VEP; break; } for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) { reg = ioread32(&pcfg->dsp_pff_inst_id[i]); if (reg != p.pff) continue; p.port = i + 1; break; } if (p.port != -1) break; } if (copy_to_user(up, &p, sizeof(p))) return -EFAULT; return 0; } static int ioctl_port_to_pff(struct switchtec_dev *stdev, struct switchtec_ioctl_pff_port __user *up) { struct switchtec_ioctl_pff_port p; struct part_cfg_regs __iomem *pcfg; if (copy_from_user(&p, up, sizeof(p))) return -EFAULT; if (p.partition == SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX) pcfg = stdev->mmio_part_cfg; else if (p.partition < stdev->partition_count) pcfg = &stdev->mmio_part_cfg_all[p.partition]; else return -EINVAL; switch (p.port) { case 0: p.pff = ioread32(&pcfg->usp_pff_inst_id); break; case SWITCHTEC_IOCTL_PFF_VEP: p.pff = ioread32(&pcfg->vep_pff_inst_id) & 0xFF; break; default: if (p.port > ARRAY_SIZE(pcfg->dsp_pff_inst_id)) return -EINVAL; p.port = array_index_nospec(p.port, ARRAY_SIZE(pcfg->dsp_pff_inst_id) + 1); p.pff = ioread32(&pcfg->dsp_pff_inst_id[p.port - 1]); break; } if (copy_to_user(up, &p, sizeof(p))) return -EFAULT; return 0; } static long switchtec_dev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct switchtec_user *stuser = filp->private_data; struct switchtec_dev *stdev = stuser->stdev; int rc; void __user *argp = (void __user *)arg; rc = lock_mutex_and_test_alive(stdev); if (rc) return rc; switch (cmd) { case SWITCHTEC_IOCTL_FLASH_INFO: rc = ioctl_flash_info(stdev, argp); break; case SWITCHTEC_IOCTL_FLASH_PART_INFO: rc = ioctl_flash_part_info(stdev, argp); break; case SWITCHTEC_IOCTL_EVENT_SUMMARY_LEGACY: rc = ioctl_event_summary(stdev, stuser, argp, sizeof(struct switchtec_ioctl_event_summary_legacy)); break; case SWITCHTEC_IOCTL_EVENT_CTL: rc = ioctl_event_ctl(stdev, argp); break; case SWITCHTEC_IOCTL_PFF_TO_PORT: rc = ioctl_pff_to_port(stdev, argp); break; case SWITCHTEC_IOCTL_PORT_TO_PFF: rc = ioctl_port_to_pff(stdev, argp); break; case SWITCHTEC_IOCTL_EVENT_SUMMARY: rc = ioctl_event_summary(stdev, stuser, argp, sizeof(struct switchtec_ioctl_event_summary)); break; default: rc = -ENOTTY; break; } mutex_unlock(&stdev->mrpc_mutex); return rc; } static const struct file_operations switchtec_fops = { .owner = THIS_MODULE, .open = switchtec_dev_open, .release = switchtec_dev_release, .write = switchtec_dev_write, .read = switchtec_dev_read, .poll = switchtec_dev_poll, .unlocked_ioctl = switchtec_dev_ioctl, .compat_ioctl = compat_ptr_ioctl, }; static void link_event_work(struct work_struct *work) { struct switchtec_dev *stdev; stdev = container_of(work, struct switchtec_dev, link_event_work); if (stdev->link_notifier) stdev->link_notifier(stdev); } static void check_link_state_events(struct switchtec_dev *stdev) { int idx; u32 reg; int count; int occurred = 0; for (idx = 0; idx < stdev->pff_csr_count; idx++) { reg = ioread32(&stdev->mmio_pff_csr[idx].link_state_hdr); dev_dbg(&stdev->dev, "link_state: %d->%08x\n", idx, reg); count = (reg >> 5) & 0xFF; if (count != stdev->link_event_count[idx]) { occurred = 1; stdev->link_event_count[idx] = count; } } if (occurred) schedule_work(&stdev->link_event_work); } static void enable_link_state_events(struct switchtec_dev *stdev) { int idx; for (idx = 0; idx < stdev->pff_csr_count; idx++) { iowrite32(SWITCHTEC_EVENT_CLEAR | SWITCHTEC_EVENT_EN_IRQ, &stdev->mmio_pff_csr[idx].link_state_hdr); } } static void enable_dma_mrpc(struct switchtec_dev *stdev) { writeq(stdev->dma_mrpc_dma_addr, &stdev->mmio_mrpc->dma_addr); flush_wc_buf(stdev); iowrite32(SWITCHTEC_DMA_MRPC_EN, &stdev->mmio_mrpc->dma_en); } static void stdev_release(struct device *dev) { struct switchtec_dev *stdev = to_stdev(dev); kfree(stdev); } static void stdev_kill(struct switchtec_dev *stdev) { struct switchtec_user *stuser, *tmpuser; pci_clear_master(stdev->pdev); cancel_delayed_work_sync(&stdev->mrpc_timeout); /* Mark the hardware as unavailable and complete all completions */ mutex_lock(&stdev->mrpc_mutex); stdev->alive = false; /* Wake up and kill any users waiting on an MRPC request */ list_for_each_entry_safe(stuser, tmpuser, &stdev->mrpc_queue, list) { stuser->cmd_done = true; wake_up_interruptible(&stuser->cmd_comp); list_del_init(&stuser->list); stuser_put(stuser); } mutex_unlock(&stdev->mrpc_mutex); /* Wake up any users waiting on event_wq */ wake_up_interruptible(&stdev->event_wq); } static struct switchtec_dev *stdev_create(struct pci_dev *pdev) { struct switchtec_dev *stdev; int minor; struct device *dev; struct cdev *cdev; int rc; stdev = kzalloc_node(sizeof(*stdev), GFP_KERNEL, dev_to_node(&pdev->dev)); if (!stdev) return ERR_PTR(-ENOMEM); stdev->alive = true; stdev->pdev = pci_dev_get(pdev); INIT_LIST_HEAD(&stdev->mrpc_queue); mutex_init(&stdev->mrpc_mutex); stdev->mrpc_busy = 0; INIT_WORK(&stdev->mrpc_work, mrpc_event_work); INIT_DELAYED_WORK(&stdev->mrpc_timeout, mrpc_timeout_work); INIT_WORK(&stdev->link_event_work, link_event_work); init_waitqueue_head(&stdev->event_wq); atomic_set(&stdev->event_cnt, 0); dev = &stdev->dev; device_initialize(dev); dev->class = &switchtec_class; dev->parent = &pdev->dev; dev->groups = switchtec_device_groups; dev->release = stdev_release; minor = ida_alloc(&switchtec_minor_ida, GFP_KERNEL); if (minor < 0) { rc = minor; goto err_put; } dev->devt = MKDEV(MAJOR(switchtec_devt), minor); dev_set_name(dev, "switchtec%d", minor); cdev = &stdev->cdev; cdev_init(cdev, &switchtec_fops); cdev->owner = THIS_MODULE; return stdev; err_put: pci_dev_put(stdev->pdev); put_device(&stdev->dev); return ERR_PTR(rc); } static int mask_event(struct switchtec_dev *stdev, int eid, int idx) { size_t off = event_regs[eid].offset; u32 __iomem *hdr_reg; u32 hdr; hdr_reg = event_regs[eid].map_reg(stdev, off, idx); hdr = ioread32(hdr_reg); if (hdr & SWITCHTEC_EVENT_NOT_SUPP) return 0; if (!(hdr & SWITCHTEC_EVENT_OCCURRED && hdr & SWITCHTEC_EVENT_EN_IRQ)) return 0; dev_dbg(&stdev->dev, "%s: %d %d %x\n", __func__, eid, idx, hdr); hdr &= ~(SWITCHTEC_EVENT_EN_IRQ | SWITCHTEC_EVENT_OCCURRED); iowrite32(hdr, hdr_reg); return 1; } static int mask_all_events(struct switchtec_dev *stdev, int eid) { int idx; int count = 0; if (event_regs[eid].map_reg == part_ev_reg) { for (idx = 0; idx < stdev->partition_count; idx++) count += mask_event(stdev, eid, idx); } else if (event_regs[eid].map_reg == pff_ev_reg) { for (idx = 0; idx < stdev->pff_csr_count; idx++) { if (!stdev->pff_local[idx]) continue; count += mask_event(stdev, eid, idx); } } else { count += mask_event(stdev, eid, 0); } return count; } static irqreturn_t switchtec_event_isr(int irq, void *dev) { struct switchtec_dev *stdev = dev; u32 reg; irqreturn_t ret = IRQ_NONE; int eid, event_count = 0; reg = ioread32(&stdev->mmio_part_cfg->mrpc_comp_hdr); if (reg & SWITCHTEC_EVENT_OCCURRED) { dev_dbg(&stdev->dev, "%s: mrpc comp\n", __func__); ret = IRQ_HANDLED; schedule_work(&stdev->mrpc_work); iowrite32(reg, &stdev->mmio_part_cfg->mrpc_comp_hdr); } check_link_state_events(stdev); for (eid = 0; eid < SWITCHTEC_IOCTL_MAX_EVENTS; eid++) { if (eid == SWITCHTEC_IOCTL_EVENT_LINK_STATE || eid == SWITCHTEC_IOCTL_EVENT_MRPC_COMP) continue; event_count += mask_all_events(stdev, eid); } if (event_count) { atomic_inc(&stdev->event_cnt); wake_up_interruptible(&stdev->event_wq); dev_dbg(&stdev->dev, "%s: %d events\n", __func__, event_count); return IRQ_HANDLED; } return ret; } static irqreturn_t switchtec_dma_mrpc_isr(int irq, void *dev) { struct switchtec_dev *stdev = dev; iowrite32(SWITCHTEC_EVENT_CLEAR | SWITCHTEC_EVENT_EN_IRQ, &stdev->mmio_part_cfg->mrpc_comp_hdr); schedule_work(&stdev->mrpc_work); return IRQ_HANDLED; } static int switchtec_init_isr(struct switchtec_dev *stdev) { int nvecs; int event_irq; int dma_mrpc_irq; int rc; if (nirqs < 4) nirqs = 4; nvecs = pci_alloc_irq_vectors(stdev->pdev, 1, nirqs, PCI_IRQ_MSIX | PCI_IRQ_MSI | PCI_IRQ_VIRTUAL); if (nvecs < 0) return nvecs; event_irq = ioread16(&stdev->mmio_part_cfg->vep_vector_number); if (event_irq < 0 || event_irq >= nvecs) return -EFAULT; event_irq = pci_irq_vector(stdev->pdev, event_irq); if (event_irq < 0) return event_irq; rc = devm_request_irq(&stdev->pdev->dev, event_irq, switchtec_event_isr, 0, KBUILD_MODNAME, stdev); if (rc) return rc; if (!stdev->dma_mrpc) return rc; dma_mrpc_irq = ioread32(&stdev->mmio_mrpc->dma_vector); if (dma_mrpc_irq < 0 || dma_mrpc_irq >= nvecs) return -EFAULT; dma_mrpc_irq = pci_irq_vector(stdev->pdev, dma_mrpc_irq); if (dma_mrpc_irq < 0) return dma_mrpc_irq; rc = devm_request_irq(&stdev->pdev->dev, dma_mrpc_irq, switchtec_dma_mrpc_isr, 0, KBUILD_MODNAME, stdev); return rc; } static void init_pff(struct switchtec_dev *stdev) { int i; u32 reg; struct part_cfg_regs __iomem *pcfg = stdev->mmio_part_cfg; for (i = 0; i < SWITCHTEC_MAX_PFF_CSR; i++) { reg = ioread16(&stdev->mmio_pff_csr[i].vendor_id); if (reg != PCI_VENDOR_ID_MICROSEMI) break; } stdev->pff_csr_count = i; reg = ioread32(&pcfg->usp_pff_inst_id); if (reg < stdev->pff_csr_count) stdev->pff_local[reg] = 1; reg = ioread32(&pcfg->vep_pff_inst_id) & 0xFF; if (reg < stdev->pff_csr_count) stdev->pff_local[reg] = 1; for (i = 0; i < ARRAY_SIZE(pcfg->dsp_pff_inst_id); i++) { reg = ioread32(&pcfg->dsp_pff_inst_id[i]); if (reg < stdev->pff_csr_count) stdev->pff_local[reg] = 1; } } static int switchtec_init_pci(struct switchtec_dev *stdev, struct pci_dev *pdev) { int rc; void __iomem *map; unsigned long res_start, res_len; u32 __iomem *part_id; rc = pcim_enable_device(pdev); if (rc) return rc; rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (rc) return rc; pci_set_master(pdev); res_start = pci_resource_start(pdev, 0); res_len = pci_resource_len(pdev, 0); if (!devm_request_mem_region(&pdev->dev, res_start, res_len, KBUILD_MODNAME)) return -EBUSY; stdev->mmio_mrpc = devm_ioremap_wc(&pdev->dev, res_start, SWITCHTEC_GAS_TOP_CFG_OFFSET); if (!stdev->mmio_mrpc) return -ENOMEM; map = devm_ioremap(&pdev->dev, res_start + SWITCHTEC_GAS_TOP_CFG_OFFSET, res_len - SWITCHTEC_GAS_TOP_CFG_OFFSET); if (!map) return -ENOMEM; stdev->mmio = map - SWITCHTEC_GAS_TOP_CFG_OFFSET; stdev->mmio_sw_event = stdev->mmio + SWITCHTEC_GAS_SW_EVENT_OFFSET; stdev->mmio_sys_info = stdev->mmio + SWITCHTEC_GAS_SYS_INFO_OFFSET; stdev->mmio_flash_info = stdev->mmio + SWITCHTEC_GAS_FLASH_INFO_OFFSET; stdev->mmio_ntb = stdev->mmio + SWITCHTEC_GAS_NTB_OFFSET; if (stdev->gen == SWITCHTEC_GEN3) part_id = &stdev->mmio_sys_info->gen3.partition_id; else if (stdev->gen >= SWITCHTEC_GEN4) part_id = &stdev->mmio_sys_info->gen4.partition_id; else return -EOPNOTSUPP; stdev->partition = ioread8(part_id); stdev->partition_count = ioread8(&stdev->mmio_ntb->partition_count); stdev->mmio_part_cfg_all = stdev->mmio + SWITCHTEC_GAS_PART_CFG_OFFSET; stdev->mmio_part_cfg = &stdev->mmio_part_cfg_all[stdev->partition]; stdev->mmio_pff_csr = stdev->mmio + SWITCHTEC_GAS_PFF_CSR_OFFSET; if (stdev->partition_count < 1) stdev->partition_count = 1; init_pff(stdev); pci_set_drvdata(pdev, stdev); if (!use_dma_mrpc) return 0; if (ioread32(&stdev->mmio_mrpc->dma_ver) == 0) return 0; stdev->dma_mrpc = dma_alloc_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc), &stdev->dma_mrpc_dma_addr, GFP_KERNEL); if (stdev->dma_mrpc == NULL) return -ENOMEM; return 0; } static void switchtec_exit_pci(struct switchtec_dev *stdev) { if (stdev->dma_mrpc) { iowrite32(0, &stdev->mmio_mrpc->dma_en); flush_wc_buf(stdev); writeq(0, &stdev->mmio_mrpc->dma_addr); dma_free_coherent(&stdev->pdev->dev, sizeof(*stdev->dma_mrpc), stdev->dma_mrpc, stdev->dma_mrpc_dma_addr); stdev->dma_mrpc = NULL; } } static int switchtec_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct switchtec_dev *stdev; int rc; if (pdev->class == (PCI_CLASS_BRIDGE_OTHER << 8)) request_module_nowait("ntb_hw_switchtec"); stdev = stdev_create(pdev); if (IS_ERR(stdev)) return PTR_ERR(stdev); stdev->gen = id->driver_data; rc = switchtec_init_pci(stdev, pdev); if (rc) goto err_put; rc = switchtec_init_isr(stdev); if (rc) { dev_err(&stdev->dev, "failed to init isr.\n"); goto err_exit_pci; } iowrite32(SWITCHTEC_EVENT_CLEAR | SWITCHTEC_EVENT_EN_IRQ, &stdev->mmio_part_cfg->mrpc_comp_hdr); enable_link_state_events(stdev); if (stdev->dma_mrpc) enable_dma_mrpc(stdev); rc = cdev_device_add(&stdev->cdev, &stdev->dev); if (rc) goto err_devadd; dev_info(&stdev->dev, "Management device registered.\n"); return 0; err_devadd: stdev_kill(stdev); err_exit_pci: switchtec_exit_pci(stdev); err_put: ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt)); put_device(&stdev->dev); return rc; } static void switchtec_pci_remove(struct pci_dev *pdev) { struct switchtec_dev *stdev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, NULL); cdev_device_del(&stdev->cdev, &stdev->dev); ida_free(&switchtec_minor_ida, MINOR(stdev->dev.devt)); dev_info(&stdev->dev, "unregistered.\n"); stdev_kill(stdev); switchtec_exit_pci(stdev); pci_dev_put(stdev->pdev); stdev->pdev = NULL; put_device(&stdev->dev); } #define SWITCHTEC_PCI_DEVICE(device_id, gen) \ { \ .vendor = PCI_VENDOR_ID_MICROSEMI, \ .device = device_id, \ .subvendor = PCI_ANY_ID, \ .subdevice = PCI_ANY_ID, \ .class = (PCI_CLASS_MEMORY_OTHER << 8), \ .class_mask = 0xFFFFFFFF, \ .driver_data = gen, \ }, \ { \ .vendor = PCI_VENDOR_ID_MICROSEMI, \ .device = device_id, \ .subvendor = PCI_ANY_ID, \ .subdevice = PCI_ANY_ID, \ .class = (PCI_CLASS_BRIDGE_OTHER << 8), \ .class_mask = 0xFFFFFFFF, \ .driver_data = gen, \ } static const struct pci_device_id switchtec_pci_tbl[] = { SWITCHTEC_PCI_DEVICE(0x8531, SWITCHTEC_GEN3), /* PFX 24xG3 */ SWITCHTEC_PCI_DEVICE(0x8532, SWITCHTEC_GEN3), /* PFX 32xG3 */ SWITCHTEC_PCI_DEVICE(0x8533, SWITCHTEC_GEN3), /* PFX 48xG3 */ SWITCHTEC_PCI_DEVICE(0x8534, SWITCHTEC_GEN3), /* PFX 64xG3 */ SWITCHTEC_PCI_DEVICE(0x8535, SWITCHTEC_GEN3), /* PFX 80xG3 */ SWITCHTEC_PCI_DEVICE(0x8536, SWITCHTEC_GEN3), /* PFX 96xG3 */ SWITCHTEC_PCI_DEVICE(0x8541, SWITCHTEC_GEN3), /* PSX 24xG3 */ SWITCHTEC_PCI_DEVICE(0x8542, SWITCHTEC_GEN3), /* PSX 32xG3 */ SWITCHTEC_PCI_DEVICE(0x8543, SWITCHTEC_GEN3), /* PSX 48xG3 */ SWITCHTEC_PCI_DEVICE(0x8544, SWITCHTEC_GEN3), /* PSX 64xG3 */ SWITCHTEC_PCI_DEVICE(0x8545, SWITCHTEC_GEN3), /* PSX 80xG3 */ SWITCHTEC_PCI_DEVICE(0x8546, SWITCHTEC_GEN3), /* PSX 96xG3 */ SWITCHTEC_PCI_DEVICE(0x8551, SWITCHTEC_GEN3), /* PAX 24XG3 */ SWITCHTEC_PCI_DEVICE(0x8552, SWITCHTEC_GEN3), /* PAX 32XG3 */ SWITCHTEC_PCI_DEVICE(0x8553, SWITCHTEC_GEN3), /* PAX 48XG3 */ SWITCHTEC_PCI_DEVICE(0x8554, SWITCHTEC_GEN3), /* PAX 64XG3 */ SWITCHTEC_PCI_DEVICE(0x8555, SWITCHTEC_GEN3), /* PAX 80XG3 */ SWITCHTEC_PCI_DEVICE(0x8556, SWITCHTEC_GEN3), /* PAX 96XG3 */ SWITCHTEC_PCI_DEVICE(0x8561, SWITCHTEC_GEN3), /* PFXL 24XG3 */ SWITCHTEC_PCI_DEVICE(0x8562, SWITCHTEC_GEN3), /* PFXL 32XG3 */ SWITCHTEC_PCI_DEVICE(0x8563, SWITCHTEC_GEN3), /* PFXL 48XG3 */ SWITCHTEC_PCI_DEVICE(0x8564, SWITCHTEC_GEN3), /* PFXL 64XG3 */ SWITCHTEC_PCI_DEVICE(0x8565, SWITCHTEC_GEN3), /* PFXL 80XG3 */ SWITCHTEC_PCI_DEVICE(0x8566, SWITCHTEC_GEN3), /* PFXL 96XG3 */ SWITCHTEC_PCI_DEVICE(0x8571, SWITCHTEC_GEN3), /* PFXI 24XG3 */ SWITCHTEC_PCI_DEVICE(0x8572, SWITCHTEC_GEN3), /* PFXI 32XG3 */ SWITCHTEC_PCI_DEVICE(0x8573, SWITCHTEC_GEN3), /* PFXI 48XG3 */ SWITCHTEC_PCI_DEVICE(0x8574, SWITCHTEC_GEN3), /* PFXI 64XG3 */ SWITCHTEC_PCI_DEVICE(0x8575, SWITCHTEC_GEN3), /* PFXI 80XG3 */ SWITCHTEC_PCI_DEVICE(0x8576, SWITCHTEC_GEN3), /* PFXI 96XG3 */ SWITCHTEC_PCI_DEVICE(0x4000, SWITCHTEC_GEN4), /* PFX 100XG4 */ SWITCHTEC_PCI_DEVICE(0x4084, SWITCHTEC_GEN4), /* PFX 84XG4 */ SWITCHTEC_PCI_DEVICE(0x4068, SWITCHTEC_GEN4), /* PFX 68XG4 */ SWITCHTEC_PCI_DEVICE(0x4052, SWITCHTEC_GEN4), /* PFX 52XG4 */ SWITCHTEC_PCI_DEVICE(0x4036, SWITCHTEC_GEN4), /* PFX 36XG4 */ SWITCHTEC_PCI_DEVICE(0x4028, SWITCHTEC_GEN4), /* PFX 28XG4 */ SWITCHTEC_PCI_DEVICE(0x4100, SWITCHTEC_GEN4), /* PSX 100XG4 */ SWITCHTEC_PCI_DEVICE(0x4184, SWITCHTEC_GEN4), /* PSX 84XG4 */ SWITCHTEC_PCI_DEVICE(0x4168, SWITCHTEC_GEN4), /* PSX 68XG4 */ SWITCHTEC_PCI_DEVICE(0x4152, SWITCHTEC_GEN4), /* PSX 52XG4 */ SWITCHTEC_PCI_DEVICE(0x4136, SWITCHTEC_GEN4), /* PSX 36XG4 */ SWITCHTEC_PCI_DEVICE(0x4128, SWITCHTEC_GEN4), /* PSX 28XG4 */ SWITCHTEC_PCI_DEVICE(0x4200, SWITCHTEC_GEN4), /* PAX 100XG4 */ SWITCHTEC_PCI_DEVICE(0x4284, SWITCHTEC_GEN4), /* PAX 84XG4 */ SWITCHTEC_PCI_DEVICE(0x4268, SWITCHTEC_GEN4), /* PAX 68XG4 */ SWITCHTEC_PCI_DEVICE(0x4252, SWITCHTEC_GEN4), /* PAX 52XG4 */ SWITCHTEC_PCI_DEVICE(0x4236, SWITCHTEC_GEN4), /* PAX 36XG4 */ SWITCHTEC_PCI_DEVICE(0x4228, SWITCHTEC_GEN4), /* PAX 28XG4 */ SWITCHTEC_PCI_DEVICE(0x4352, SWITCHTEC_GEN4), /* PFXA 52XG4 */ SWITCHTEC_PCI_DEVICE(0x4336, SWITCHTEC_GEN4), /* PFXA 36XG4 */ SWITCHTEC_PCI_DEVICE(0x4328, SWITCHTEC_GEN4), /* PFXA 28XG4 */ SWITCHTEC_PCI_DEVICE(0x4452, SWITCHTEC_GEN4), /* PSXA 52XG4 */ SWITCHTEC_PCI_DEVICE(0x4436, SWITCHTEC_GEN4), /* PSXA 36XG4 */ SWITCHTEC_PCI_DEVICE(0x4428, SWITCHTEC_GEN4), /* PSXA 28XG4 */ SWITCHTEC_PCI_DEVICE(0x4552, SWITCHTEC_GEN4), /* PAXA 52XG4 */ SWITCHTEC_PCI_DEVICE(0x4536, SWITCHTEC_GEN4), /* PAXA 36XG4 */ SWITCHTEC_PCI_DEVICE(0x4528, SWITCHTEC_GEN4), /* PAXA 28XG4 */ SWITCHTEC_PCI_DEVICE(0x5000, SWITCHTEC_GEN5), /* PFX 100XG5 */ SWITCHTEC_PCI_DEVICE(0x5084, SWITCHTEC_GEN5), /* PFX 84XG5 */ SWITCHTEC_PCI_DEVICE(0x5068, SWITCHTEC_GEN5), /* PFX 68XG5 */ SWITCHTEC_PCI_DEVICE(0x5052, SWITCHTEC_GEN5), /* PFX 52XG5 */ SWITCHTEC_PCI_DEVICE(0x5036, SWITCHTEC_GEN5), /* PFX 36XG5 */ SWITCHTEC_PCI_DEVICE(0x5028, SWITCHTEC_GEN5), /* PFX 28XG5 */ SWITCHTEC_PCI_DEVICE(0x5100, SWITCHTEC_GEN5), /* PSX 100XG5 */ SWITCHTEC_PCI_DEVICE(0x5184, SWITCHTEC_GEN5), /* PSX 84XG5 */ SWITCHTEC_PCI_DEVICE(0x5168, SWITCHTEC_GEN5), /* PSX 68XG5 */ SWITCHTEC_PCI_DEVICE(0x5152, SWITCHTEC_GEN5), /* PSX 52XG5 */ SWITCHTEC_PCI_DEVICE(0x5136, SWITCHTEC_GEN5), /* PSX 36XG5 */ SWITCHTEC_PCI_DEVICE(0x5128, SWITCHTEC_GEN5), /* PSX 28XG5 */ SWITCHTEC_PCI_DEVICE(0x5200, SWITCHTEC_GEN5), /* PAX 100XG5 */ SWITCHTEC_PCI_DEVICE(0x5284, SWITCHTEC_GEN5), /* PAX 84XG5 */ SWITCHTEC_PCI_DEVICE(0x5268, SWITCHTEC_GEN5), /* PAX 68XG5 */ SWITCHTEC_PCI_DEVICE(0x5252, SWITCHTEC_GEN5), /* PAX 52XG5 */ SWITCHTEC_PCI_DEVICE(0x5236, SWITCHTEC_GEN5), /* PAX 36XG5 */ SWITCHTEC_PCI_DEVICE(0x5228, SWITCHTEC_GEN5), /* PAX 28XG5 */ SWITCHTEC_PCI_DEVICE(0x5300, SWITCHTEC_GEN5), /* PFXA 100XG5 */ SWITCHTEC_PCI_DEVICE(0x5384, SWITCHTEC_GEN5), /* PFXA 84XG5 */ SWITCHTEC_PCI_DEVICE(0x5368, SWITCHTEC_GEN5), /* PFXA 68XG5 */ SWITCHTEC_PCI_DEVICE(0x5352, SWITCHTEC_GEN5), /* PFXA 52XG5 */ SWITCHTEC_PCI_DEVICE(0x5336, SWITCHTEC_GEN5), /* PFXA 36XG5 */ SWITCHTEC_PCI_DEVICE(0x5328, SWITCHTEC_GEN5), /* PFXA 28XG5 */ SWITCHTEC_PCI_DEVICE(0x5400, SWITCHTEC_GEN5), /* PSXA 100XG5 */ SWITCHTEC_PCI_DEVICE(0x5484, SWITCHTEC_GEN5), /* PSXA 84XG5 */ SWITCHTEC_PCI_DEVICE(0x5468, SWITCHTEC_GEN5), /* PSXA 68XG5 */ SWITCHTEC_PCI_DEVICE(0x5452, SWITCHTEC_GEN5), /* PSXA 52XG5 */ SWITCHTEC_PCI_DEVICE(0x5436, SWITCHTEC_GEN5), /* PSXA 36XG5 */ SWITCHTEC_PCI_DEVICE(0x5428, SWITCHTEC_GEN5), /* PSXA 28XG5 */ SWITCHTEC_PCI_DEVICE(0x5500, SWITCHTEC_GEN5), /* PAXA 100XG5 */ SWITCHTEC_PCI_DEVICE(0x5584, SWITCHTEC_GEN5), /* PAXA 84XG5 */ SWITCHTEC_PCI_DEVICE(0x5568, SWITCHTEC_GEN5), /* PAXA 68XG5 */ SWITCHTEC_PCI_DEVICE(0x5552, SWITCHTEC_GEN5), /* PAXA 52XG5 */ SWITCHTEC_PCI_DEVICE(0x5536, SWITCHTEC_GEN5), /* PAXA 36XG5 */ SWITCHTEC_PCI_DEVICE(0x5528, SWITCHTEC_GEN5), /* PAXA 28XG5 */ {0} }; MODULE_DEVICE_TABLE(pci, switchtec_pci_tbl); static struct pci_driver switchtec_pci_driver = { .name = KBUILD_MODNAME, .id_table = switchtec_pci_tbl, .probe = switchtec_pci_probe, .remove = switchtec_pci_remove, }; static int __init switchtec_init(void) { int rc; rc = alloc_chrdev_region(&switchtec_devt, 0, max_devices, "switchtec"); if (rc) return rc; rc = class_register(&switchtec_class); if (rc) goto err_create_class; rc = pci_register_driver(&switchtec_pci_driver); if (rc) goto err_pci_register; pr_info(KBUILD_MODNAME ": loaded.\n"); return 0; err_pci_register: class_unregister(&switchtec_class); err_create_class: unregister_chrdev_region(switchtec_devt, max_devices); return rc; } module_init(switchtec_init); static void __exit switchtec_exit(void) { pci_unregister_driver(&switchtec_pci_driver); class_unregister(&switchtec_class); unregister_chrdev_region(switchtec_devt, max_devices); ida_destroy(&switchtec_minor_ida); pr_info(KBUILD_MODNAME ": unloaded.\n"); } module_exit(switchtec_exit);
// SPDX-License-Identifier: GPL-2.0-only /***************************************************************************** * * * File: mv88x201x.c * * $Revision: 1.12 $ * * $Date: 2005/04/15 19:27:14 $ * * Description: * * Marvell PHY (mv88x201x) functionality. * * part of the Chelsio 10Gb Ethernet Driver. * * * * * * http://www.chelsio.com * * * * Copyright (c) 2003 - 2005 Chelsio Communications, Inc. * * All rights reserved. * * * * Maintainers: [email protected] * * * * Authors: Dimitrios Michailidis <[email protected]> * * Tina Yang <[email protected]> * * Felix Marti <[email protected]> * * Scott Bardone <[email protected]> * * Kurt Ottaway <[email protected]> * * Frank DiMambro <[email protected]> * * * * History: * * * ****************************************************************************/ #include "cphy.h" #include "elmer0.h" /* * The 88x2010 Rev C. requires some link status registers * to be read * twice in order to get the right values. Future * revisions will fix * this problem and then this macro * can disappear. */ #define MV88x2010_LINK_STATUS_BUGS 1 static int led_init(struct cphy *cphy) { /* Setup the LED registers so we can turn on/off. * Writing these bits maps control to another * register. mmd(0x1) addr(0x7) */ cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd); return 0; } static int led_link(struct cphy *cphy, u32 do_enable) { u32 led = 0; #define LINK_ENABLE_BIT 0x1 cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led); if (do_enable & LINK_ENABLE_BIT) { led |= LINK_ENABLE_BIT; cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); } else { led &= ~LINK_ENABLE_BIT; cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led); } return 0; } /* Port Reset */ static int mv88x201x_reset(struct cphy *cphy, int wait) { /* This can be done through registers. It is not required since * a full chip reset is used. */ return 0; } static int mv88x201x_interrupt_enable(struct cphy *cphy) { /* Enable PHY LASI interrupts. */ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, MDIO_PMA_LASI_LSALARM); /* Enable Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { u32 elmer; t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); elmer |= ELMER0_GP_BIT6; t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); } return 0; } static int mv88x201x_interrupt_disable(struct cphy *cphy) { /* Disable PHY LASI interrupts. */ cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0); /* Disable Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { u32 elmer; t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer); elmer &= ~ELMER0_GP_BIT6; t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer); } return 0; } static int mv88x201x_interrupt_clear(struct cphy *cphy) { u32 elmer; u32 val; #ifdef MV88x2010_LINK_STATUS_BUGS /* Required to read twice before clear takes affect. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); /* Read this register after the others above it else * the register doesn't clear correctly. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); #endif /* Clear link status. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); /* Clear PHY LASI interrupts. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val); #ifdef MV88x2010_LINK_STATUS_BUGS /* Do it again. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val); cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val); #endif /* Clear Marvell interrupts through Elmer0. */ if (t1_is_asic(cphy->adapter)) { t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer); elmer |= ELMER0_GP_BIT6; t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer); } return 0; } static int mv88x201x_interrupt_handler(struct cphy *cphy) { /* Clear interrupts */ mv88x201x_interrupt_clear(cphy); /* We have only enabled link change interrupts and so * cphy_cause must be a link change interrupt. */ return cphy_cause_link_change; } static int mv88x201x_set_loopback(struct cphy *cphy, int on) { return 0; } static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok, int *speed, int *duplex, int *fc) { u32 val = 0; if (link_ok) { /* Read link status. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val); val &= MDIO_STAT1_LSTATUS; *link_ok = (val == MDIO_STAT1_LSTATUS); /* Turn on/off Link LED */ led_link(cphy, *link_ok); } if (speed) *speed = SPEED_10000; if (duplex) *duplex = DUPLEX_FULL; if (fc) *fc = PAUSE_RX | PAUSE_TX; return 0; } static void mv88x201x_destroy(struct cphy *cphy) { kfree(cphy); } static const struct cphy_ops mv88x201x_ops = { .destroy = mv88x201x_destroy, .reset = mv88x201x_reset, .interrupt_enable = mv88x201x_interrupt_enable, .interrupt_disable = mv88x201x_interrupt_disable, .interrupt_clear = mv88x201x_interrupt_clear, .interrupt_handler = mv88x201x_interrupt_handler, .get_link_status = mv88x201x_get_link_status, .set_loopback = mv88x201x_set_loopback, .mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS | MDIO_DEVS_PHYXS | MDIO_DEVS_WIS), }; static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr, const struct mdio_ops *mdio_ops) { u32 val; struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL); if (!cphy) return NULL; cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops); /* Commands the PHY to enable XFP's clock. */ cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val); cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1); /* Clear link status. Required because of a bug in the PHY. */ cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val); cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val); /* Allows for Link,Ack LED turn on/off */ led_init(cphy); return cphy; } /* Chip Reset */ static int mv88x201x_phy_reset(adapter_t *adapter) { u32 val; t1_tpi_read(adapter, A_ELMER0_GPO, &val); val &= ~4; t1_tpi_write(adapter, A_ELMER0_GPO, val); msleep(100); t1_tpi_write(adapter, A_ELMER0_GPO, val | 4); msleep(1000); /* Now lets enable the Laser. Delay 100us */ t1_tpi_read(adapter, A_ELMER0_GPO, &val); val |= 0x8000; t1_tpi_write(adapter, A_ELMER0_GPO, val); udelay(100); return 0; } const struct gphy t1_mv88x201x_ops = { .create = mv88x201x_phy_create, .reset = mv88x201x_phy_reset };
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2022 Intel Corporation */ #ifndef __INTEL_DISPLAY_LIMITS_H__ #define __INTEL_DISPLAY_LIMITS_H__ /* * Keep the pipe enum values fixed: the code assumes that PIPE_A=0, the * rest have consecutive values and match the enum values of transcoders * with a 1:1 transcoder -> pipe mapping. */ enum pipe { INVALID_PIPE = -1, PIPE_A = 0, PIPE_B, PIPE_C, PIPE_D, _PIPE_EDP, I915_MAX_PIPES = _PIPE_EDP }; enum transcoder { INVALID_TRANSCODER = -1, /* * The following transcoders have a 1:1 transcoder -> pipe mapping, * keep their values fixed: the code assumes that TRANSCODER_A=0, the * rest have consecutive values and match the enum values of the pipes * they map to. */ TRANSCODER_A = PIPE_A, TRANSCODER_B = PIPE_B, TRANSCODER_C = PIPE_C, TRANSCODER_D = PIPE_D, /* * The following transcoders can map to any pipe, their enum value * doesn't need to stay fixed. */ TRANSCODER_EDP, TRANSCODER_DSI_0, TRANSCODER_DSI_1, TRANSCODER_DSI_A = TRANSCODER_DSI_0, /* legacy DSI */ TRANSCODER_DSI_C = TRANSCODER_DSI_1, /* legacy DSI */ I915_MAX_TRANSCODERS }; /* * Global legacy plane identifier. Valid only for primary/sprite * planes on pre-g4x, and only for primary planes on g4x-bdw. */ enum i9xx_plane_id { PLANE_A, PLANE_B, PLANE_C, }; /* * Per-pipe plane identifier. * I915_MAX_PLANES in the enum below is the maximum (across all platforms) * number of planes per CRTC. Not all platforms really have this many planes, * which means some arrays of size I915_MAX_PLANES may have unused entries * between the topmost sprite plane and the cursor plane. * * This is expected to be passed to various register macros * (eg. PLANE_CTL(), PS_PLANE_SEL(), etc.) so adjust with care. */ enum plane_id { /* skl+ universal plane names */ PLANE_1, PLANE_2, PLANE_3, PLANE_4, PLANE_5, PLANE_6, PLANE_7, PLANE_CURSOR, I915_MAX_PLANES, /* pre-skl plane names */ PLANE_PRIMARY = PLANE_1, PLANE_SPRITE0, PLANE_SPRITE1, }; enum port { PORT_NONE = -1, PORT_A = 0, PORT_B, PORT_C, PORT_D, PORT_E, PORT_F, PORT_G, PORT_H, PORT_I, /* tgl+ */ PORT_TC1 = PORT_D, PORT_TC2, PORT_TC3, PORT_TC4, PORT_TC5, PORT_TC6, /* XE_LPD repositions D/E offsets and bitfields */ PORT_D_XELPD = PORT_TC5, PORT_E_XELPD, I915_MAX_PORTS }; enum hpd_pin { HPD_NONE = 0, HPD_TV = HPD_NONE, /* TV is known to be unreliable */ HPD_CRT, HPD_SDVO_B, HPD_SDVO_C, HPD_PORT_A, HPD_PORT_B, HPD_PORT_C, HPD_PORT_D, HPD_PORT_E, HPD_PORT_TC1, HPD_PORT_TC2, HPD_PORT_TC3, HPD_PORT_TC4, HPD_PORT_TC5, HPD_PORT_TC6, HPD_NUM_PINS }; #endif /* __INTEL_DISPLAY_LIMITS_H__ */
// SPDX-License-Identifier: GPL-2.0-only /* * Watchdog driver for TS-4800 based boards * * Copyright (c) 2015 - Savoir-faire Linux * */ #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/watchdog.h> static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); /* possible feed values */ #define TS4800_WDT_FEED_2S 0x1 #define TS4800_WDT_FEED_10S 0x2 #define TS4800_WDT_DISABLE 0x3 struct ts4800_wdt { struct watchdog_device wdd; struct regmap *regmap; u32 feed_offset; u32 feed_val; }; /* * TS-4800 supports the following timeout values: * * value desc * --------------------- * 0 feed for 338ms * 1 feed for 2.706s * 2 feed for 10.824s * 3 disable watchdog * * Keep the regmap/timeout map ordered by timeout */ static const struct { const int timeout; const int regval; } ts4800_wdt_map[] = { { 2, TS4800_WDT_FEED_2S }, { 10, TS4800_WDT_FEED_10S }, }; #define MAX_TIMEOUT_INDEX (ARRAY_SIZE(ts4800_wdt_map) - 1) static void ts4800_write_feed(struct ts4800_wdt *wdt, u32 val) { regmap_write(wdt->regmap, wdt->feed_offset, val); } static int ts4800_wdt_start(struct watchdog_device *wdd) { struct ts4800_wdt *wdt = watchdog_get_drvdata(wdd); ts4800_write_feed(wdt, wdt->feed_val); return 0; } static int ts4800_wdt_stop(struct watchdog_device *wdd) { struct ts4800_wdt *wdt = watchdog_get_drvdata(wdd); ts4800_write_feed(wdt, TS4800_WDT_DISABLE); return 0; } static int ts4800_wdt_set_timeout(struct watchdog_device *wdd, unsigned int timeout) { struct ts4800_wdt *wdt = watchdog_get_drvdata(wdd); int i; for (i = 0; i < MAX_TIMEOUT_INDEX; i++) { if (ts4800_wdt_map[i].timeout >= timeout) break; } wdd->timeout = ts4800_wdt_map[i].timeout; wdt->feed_val = ts4800_wdt_map[i].regval; return 0; } static const struct watchdog_ops ts4800_wdt_ops = { .owner = THIS_MODULE, .start = ts4800_wdt_start, .stop = ts4800_wdt_stop, .set_timeout = ts4800_wdt_set_timeout, }; static const struct watchdog_info ts4800_wdt_info = { .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .identity = "TS-4800 Watchdog", }; static int ts4800_wdt_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct device_node *syscon_np; struct watchdog_device *wdd; struct ts4800_wdt *wdt; u32 reg; int ret; syscon_np = of_parse_phandle(np, "syscon", 0); if (!syscon_np) { dev_err(dev, "no syscon property\n"); return -ENODEV; } ret = of_property_read_u32_index(np, "syscon", 1, &reg); if (ret < 0) { dev_err(dev, "no offset in syscon\n"); of_node_put(syscon_np); return ret; } /* allocate memory for watchdog struct */ wdt = devm_kzalloc(dev, sizeof(*wdt), GFP_KERNEL); if (!wdt) { of_node_put(syscon_np); return -ENOMEM; } /* set regmap and offset to know where to write */ wdt->feed_offset = reg; wdt->regmap = syscon_node_to_regmap(syscon_np); of_node_put(syscon_np); if (IS_ERR(wdt->regmap)) { dev_err(dev, "cannot get parent's regmap\n"); return PTR_ERR(wdt->regmap); } /* Initialize struct watchdog_device */ wdd = &wdt->wdd; wdd->parent = dev; wdd->info = &ts4800_wdt_info; wdd->ops = &ts4800_wdt_ops; wdd->min_timeout = ts4800_wdt_map[0].timeout; wdd->max_timeout = ts4800_wdt_map[MAX_TIMEOUT_INDEX].timeout; watchdog_set_drvdata(wdd, wdt); watchdog_set_nowayout(wdd, nowayout); watchdog_init_timeout(wdd, 0, dev); /* * As this watchdog supports only a few values, ts4800_wdt_set_timeout * must be called to initialize timeout and feed_val with valid values. * Default to maximum timeout if none, or an invalid one, is provided in * device tree. */ if (!wdd->timeout) wdd->timeout = wdd->max_timeout; ts4800_wdt_set_timeout(wdd, wdd->timeout); /* * The feed register is write-only, so it is not possible to determine * watchdog's state. Disable it to be in a known state. */ ts4800_wdt_stop(wdd); ret = devm_watchdog_register_device(dev, wdd); if (ret) return ret; platform_set_drvdata(pdev, wdt); dev_info(dev, "initialized (timeout = %d sec, nowayout = %d)\n", wdd->timeout, nowayout); return 0; } static const struct of_device_id ts4800_wdt_of_match[] = { { .compatible = "technologic,ts4800-wdt", }, { }, }; MODULE_DEVICE_TABLE(of, ts4800_wdt_of_match); static struct platform_driver ts4800_wdt_driver = { .probe = ts4800_wdt_probe, .driver = { .name = "ts4800_wdt", .of_match_table = ts4800_wdt_of_match, }, }; module_platform_driver(ts4800_wdt_driver); MODULE_AUTHOR("Damien Riegel <[email protected]>"); MODULE_DESCRIPTION("Watchdog driver for TS-4800 based boards"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:ts4800_wdt");
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2015-2019 Netronome Systems, Inc. */ #include <linux/debugfs.h> #include <linux/module.h> #include <linux/rtnetlink.h> #include "nfp_net.h" #include "nfp_net_dp.h" static struct dentry *nfp_dir; static int nfp_rx_q_show(struct seq_file *file, void *data) { struct nfp_net_r_vector *r_vec = file->private; struct nfp_net_rx_ring *rx_ring; int fl_rd_p, fl_wr_p, rxd_cnt; struct nfp_net_rx_desc *rxd; struct nfp_net *nn; void *frag; int i; rtnl_lock(); if (!r_vec->nfp_net || !r_vec->rx_ring) goto out; nn = r_vec->nfp_net; rx_ring = r_vec->rx_ring; if (!nfp_net_running(nn)) goto out; rxd_cnt = rx_ring->cnt; fl_rd_p = nfp_qcp_rd_ptr_read(rx_ring->qcp_fl); fl_wr_p = nfp_qcp_wr_ptr_read(rx_ring->qcp_fl); seq_printf(file, "RX[%02d,%02d]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u FL_RD=%u FL_WR=%u\n", rx_ring->idx, rx_ring->fl_qcidx, rx_ring->cnt, &rx_ring->dma, rx_ring->rxds, rx_ring->rd_p, rx_ring->wr_p, fl_rd_p, fl_wr_p); for (i = 0; i < rxd_cnt; i++) { rxd = &rx_ring->rxds[i]; seq_printf(file, "%04d: 0x%08x 0x%08x", i, rxd->vals[0], rxd->vals[1]); if (!r_vec->xsk_pool) { frag = READ_ONCE(rx_ring->rxbufs[i].frag); if (frag) seq_printf(file, " frag=%p", frag); if (rx_ring->rxbufs[i].dma_addr) seq_printf(file, " dma_addr=%pad", &rx_ring->rxbufs[i].dma_addr); } else { if (rx_ring->xsk_rxbufs[i].dma_addr) seq_printf(file, " dma_addr=%pad", &rx_ring->xsk_rxbufs[i].dma_addr); } if (i == rx_ring->rd_p % rxd_cnt) seq_puts(file, " H_RD "); if (i == rx_ring->wr_p % rxd_cnt) seq_puts(file, " H_WR "); if (i == fl_rd_p % rxd_cnt) seq_puts(file, " FL_RD"); if (i == fl_wr_p % rxd_cnt) seq_puts(file, " FL_WR"); seq_putc(file, '\n'); } out: rtnl_unlock(); return 0; } DEFINE_SHOW_ATTRIBUTE(nfp_rx_q); static int nfp_tx_q_show(struct seq_file *file, void *data); DEFINE_SHOW_ATTRIBUTE(nfp_tx_q); static int nfp_tx_q_show(struct seq_file *file, void *data) { struct nfp_net_r_vector *r_vec = file->private; struct nfp_net_tx_ring *tx_ring; struct nfp_net *nn; int d_rd_p, d_wr_p; rtnl_lock(); if (debugfs_real_fops(file->file) == &nfp_tx_q_fops) tx_ring = r_vec->tx_ring; else tx_ring = r_vec->xdp_ring; if (!r_vec->nfp_net || !tx_ring) goto out; nn = r_vec->nfp_net; if (!nfp_net_running(nn)) goto out; d_rd_p = nfp_qcp_rd_ptr_read(tx_ring->qcp_q); d_wr_p = nfp_qcp_wr_ptr_read(tx_ring->qcp_q); seq_printf(file, "TX[%02d,%02d%s]: cnt=%u dma=%pad host=%p H_RD=%u H_WR=%u D_RD=%u D_WR=%u", tx_ring->idx, tx_ring->qcidx, tx_ring == r_vec->tx_ring ? "" : "xdp", tx_ring->cnt, &tx_ring->dma, tx_ring->txds, tx_ring->rd_p, tx_ring->wr_p, d_rd_p, d_wr_p); if (tx_ring->txrwb) seq_printf(file, " TXRWB=%llu", *tx_ring->txrwb); seq_putc(file, '\n'); nfp_net_debugfs_print_tx_descs(file, &nn->dp, r_vec, tx_ring, d_rd_p, d_wr_p); out: rtnl_unlock(); return 0; } static int nfp_xdp_q_show(struct seq_file *file, void *data) { return nfp_tx_q_show(file, data); } DEFINE_SHOW_ATTRIBUTE(nfp_xdp_q); void nfp_net_debugfs_vnic_add(struct nfp_net *nn, struct dentry *ddir) { struct dentry *queues, *tx, *rx, *xdp; char name[20]; int i; if (IS_ERR_OR_NULL(nfp_dir)) return; if (nfp_net_is_data_vnic(nn)) sprintf(name, "vnic%d", nn->id); else strcpy(name, "ctrl-vnic"); nn->debugfs_dir = debugfs_create_dir(name, ddir); /* Create queue debugging sub-tree */ queues = debugfs_create_dir("queue", nn->debugfs_dir); rx = debugfs_create_dir("rx", queues); tx = debugfs_create_dir("tx", queues); xdp = debugfs_create_dir("xdp", queues); for (i = 0; i < min(nn->max_rx_rings, nn->max_r_vecs); i++) { sprintf(name, "%d", i); debugfs_create_file(name, 0400, rx, &nn->r_vecs[i], &nfp_rx_q_fops); debugfs_create_file(name, 0400, xdp, &nn->r_vecs[i], &nfp_xdp_q_fops); } for (i = 0; i < min(nn->max_tx_rings, nn->max_r_vecs); i++) { sprintf(name, "%d", i); debugfs_create_file(name, 0400, tx, &nn->r_vecs[i], &nfp_tx_q_fops); } } struct dentry *nfp_net_debugfs_device_add(struct pci_dev *pdev) { return debugfs_create_dir(pci_name(pdev), nfp_dir); } void nfp_net_debugfs_dir_clean(struct dentry **dir) { debugfs_remove_recursive(*dir); *dir = NULL; } void nfp_net_debugfs_create(void) { nfp_dir = debugfs_create_dir("nfp_net", NULL); } void nfp_net_debugfs_destroy(void) { debugfs_remove_recursive(nfp_dir); nfp_dir = NULL; }
// SPDX-License-Identifier: GPL-2.0-only /* -*- linux-c -*- ------------------------------------------------------- * * * Copyright (C) 1991, 1992 Linus Torvalds * Copyright 2007 rPath, Inc. - All Rights Reserved * Copyright 2009 Intel Corporation; author H. Peter Anvin * * ----------------------------------------------------------------------- */ /* * Get EDD BIOS disk information */ #include "boot.h" #include <linux/edd.h> #include "string.h" #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE) /* * Read the MBR (first sector) from a specific device. */ static int read_mbr(u8 devno, void *buf) { struct biosregs ireg, oreg; initregs(&ireg); ireg.ax = 0x0201; /* Legacy Read, one sector */ ireg.cx = 0x0001; /* Sector 0-0-1 */ ireg.dl = devno; ireg.bx = (size_t)buf; intcall(0x13, &ireg, &oreg); return -(oreg.eflags & X86_EFLAGS_CF); /* 0 or -1 */ } static u32 read_mbr_sig(u8 devno, struct edd_info *ei, u32 *mbrsig) { int sector_size; char *mbrbuf_ptr, *mbrbuf_end; u32 buf_base, mbr_base; extern char _end[]; u16 mbr_magic; sector_size = ei->params.bytes_per_sector; if (!sector_size) sector_size = 512; /* Best available guess */ /* Produce a naturally aligned buffer on the heap */ buf_base = (ds() << 4) + (u32)&_end; mbr_base = (buf_base+sector_size-1) & ~(sector_size-1); mbrbuf_ptr = _end + (mbr_base-buf_base); mbrbuf_end = mbrbuf_ptr + sector_size; /* Make sure we actually have space on the heap... */ if (!(boot_params.hdr.loadflags & CAN_USE_HEAP)) return -1; if (mbrbuf_end > (char *)(size_t)boot_params.hdr.heap_end_ptr) return -1; memset(mbrbuf_ptr, 0, sector_size); if (read_mbr(devno, mbrbuf_ptr)) return -1; *mbrsig = *(u32 *)&mbrbuf_ptr[EDD_MBR_SIG_OFFSET]; mbr_magic = *(u16 *)&mbrbuf_ptr[510]; /* check for valid MBR magic */ return mbr_magic == 0xAA55 ? 0 : -1; } static int get_edd_info(u8 devno, struct edd_info *ei) { struct biosregs ireg, oreg; memset(ei, 0, sizeof(*ei)); /* Check Extensions Present */ initregs(&ireg); ireg.ah = 0x41; ireg.bx = EDDMAGIC1; ireg.dl = devno; intcall(0x13, &ireg, &oreg); if (oreg.eflags & X86_EFLAGS_CF) return -1; /* No extended information */ if (oreg.bx != EDDMAGIC2) return -1; ei->device = devno; ei->version = oreg.ah; /* EDD version number */ ei->interface_support = oreg.cx; /* EDD functionality subsets */ /* Extended Get Device Parameters */ ei->params.length = sizeof(ei->params); ireg.ah = 0x48; ireg.si = (size_t)&ei->params; intcall(0x13, &ireg, &oreg); /* Get legacy CHS parameters */ /* Ralf Brown recommends setting ES:DI to 0:0 */ ireg.ah = 0x08; ireg.es = 0; intcall(0x13, &ireg, &oreg); if (!(oreg.eflags & X86_EFLAGS_CF)) { ei->legacy_max_cylinder = oreg.ch + ((oreg.cl & 0xc0) << 2); ei->legacy_max_head = oreg.dh; ei->legacy_sectors_per_track = oreg.cl & 0x3f; } return 0; } void query_edd(void) { char eddarg[8]; int do_mbr = 1; #ifdef CONFIG_EDD_OFF int do_edd = 0; #else int do_edd = 1; #endif int be_quiet; int devno; struct edd_info ei, *edp; u32 *mbrptr; if (cmdline_find_option("edd", eddarg, sizeof(eddarg)) > 0) { if (!strcmp(eddarg, "skipmbr") || !strcmp(eddarg, "skip")) { do_edd = 1; do_mbr = 0; } else if (!strcmp(eddarg, "off")) do_edd = 0; else if (!strcmp(eddarg, "on")) do_edd = 1; } be_quiet = cmdline_find_option_bool("quiet"); edp = boot_params.eddbuf; mbrptr = boot_params.edd_mbr_sig_buffer; if (!do_edd) return; /* Bugs in OnBoard or AddOnCards Bios may hang the EDD probe, * so give a hint if this happens. */ if (!be_quiet) printf("Probing EDD (edd=off to disable)... "); for (devno = 0x80; devno < 0x80+EDD_MBR_SIG_MAX; devno++) { /* * Scan the BIOS-supported hard disks and query EDD * information... */ if (!get_edd_info(devno, &ei) && boot_params.eddbuf_entries < EDDMAXNR) { memcpy(edp, &ei, sizeof(ei)); edp++; boot_params.eddbuf_entries++; } if (do_mbr && !read_mbr_sig(devno, &ei, mbrptr++)) boot_params.edd_mbr_sig_buf_entries = devno-0x80+1; } if (!be_quiet) printf("ok\n"); } #endif
// SPDX-License-Identifier: GPL-2.0 /* * drivers/uio/uio.c * * Copyright(C) 2005, Benedikt Spranger <[email protected]> * Copyright(C) 2005, Thomas Gleixner <[email protected]> * Copyright(C) 2006, Hans J. Koch <[email protected]> * Copyright(C) 2006, Greg Kroah-Hartman <[email protected]> * * Userspace IO * * Base Functions */ #include <linux/module.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/device.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/idr.h> #include <linux/sched/signal.h> #include <linux/string.h> #include <linux/kobject.h> #include <linux/cdev.h> #include <linux/uio_driver.h> #include <linux/dma-mapping.h> #define UIO_MAX_DEVICES (1U << MINORBITS) static int uio_major; static struct cdev *uio_cdev; static DEFINE_IDR(uio_idr); static const struct file_operations uio_fops; /* Protect idr accesses */ static DEFINE_MUTEX(minor_lock); /* * attributes */ struct uio_map { struct kobject kobj; struct uio_mem *mem; }; #define to_map(map) container_of(map, struct uio_map, kobj) static ssize_t map_name_show(struct uio_mem *mem, char *buf) { if (unlikely(!mem->name)) mem->name = ""; return sprintf(buf, "%s\n", mem->name); } static ssize_t map_addr_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "%pa\n", &mem->addr); } static ssize_t map_size_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "%pa\n", &mem->size); } static ssize_t map_offset_show(struct uio_mem *mem, char *buf) { return sprintf(buf, "0x%llx\n", (unsigned long long)mem->offs); } struct map_sysfs_entry { struct attribute attr; ssize_t (*show)(struct uio_mem *, char *); ssize_t (*store)(struct uio_mem *, const char *, size_t); }; static struct map_sysfs_entry name_attribute = __ATTR(name, S_IRUGO, map_name_show, NULL); static struct map_sysfs_entry addr_attribute = __ATTR(addr, S_IRUGO, map_addr_show, NULL); static struct map_sysfs_entry size_attribute = __ATTR(size, S_IRUGO, map_size_show, NULL); static struct map_sysfs_entry offset_attribute = __ATTR(offset, S_IRUGO, map_offset_show, NULL); static struct attribute *map_attrs[] = { &name_attribute.attr, &addr_attribute.attr, &size_attribute.attr, &offset_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; ATTRIBUTE_GROUPS(map); static void map_release(struct kobject *kobj) { struct uio_map *map = to_map(kobj); kfree(map); } static ssize_t map_type_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct uio_map *map = to_map(kobj); struct uio_mem *mem = map->mem; struct map_sysfs_entry *entry; entry = container_of(attr, struct map_sysfs_entry, attr); if (!entry->show) return -EIO; return entry->show(mem, buf); } static const struct sysfs_ops map_sysfs_ops = { .show = map_type_show, }; static const struct kobj_type map_attr_type = { .release = map_release, .sysfs_ops = &map_sysfs_ops, .default_groups = map_groups, }; struct uio_portio { struct kobject kobj; struct uio_port *port; }; #define to_portio(portio) container_of(portio, struct uio_portio, kobj) static ssize_t portio_name_show(struct uio_port *port, char *buf) { if (unlikely(!port->name)) port->name = ""; return sprintf(buf, "%s\n", port->name); } static ssize_t portio_start_show(struct uio_port *port, char *buf) { return sprintf(buf, "0x%lx\n", port->start); } static ssize_t portio_size_show(struct uio_port *port, char *buf) { return sprintf(buf, "0x%lx\n", port->size); } static ssize_t portio_porttype_show(struct uio_port *port, char *buf) { const char *porttypes[] = {"none", "x86", "gpio", "other"}; if ((port->porttype < 0) || (port->porttype > UIO_PORT_OTHER)) return -EINVAL; return sprintf(buf, "port_%s\n", porttypes[port->porttype]); } struct portio_sysfs_entry { struct attribute attr; ssize_t (*show)(struct uio_port *, char *); ssize_t (*store)(struct uio_port *, const char *, size_t); }; static struct portio_sysfs_entry portio_name_attribute = __ATTR(name, S_IRUGO, portio_name_show, NULL); static struct portio_sysfs_entry portio_start_attribute = __ATTR(start, S_IRUGO, portio_start_show, NULL); static struct portio_sysfs_entry portio_size_attribute = __ATTR(size, S_IRUGO, portio_size_show, NULL); static struct portio_sysfs_entry portio_porttype_attribute = __ATTR(porttype, S_IRUGO, portio_porttype_show, NULL); static struct attribute *portio_attrs[] = { &portio_name_attribute.attr, &portio_start_attribute.attr, &portio_size_attribute.attr, &portio_porttype_attribute.attr, NULL, }; ATTRIBUTE_GROUPS(portio); static void portio_release(struct kobject *kobj) { struct uio_portio *portio = to_portio(kobj); kfree(portio); } static ssize_t portio_type_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct uio_portio *portio = to_portio(kobj); struct uio_port *port = portio->port; struct portio_sysfs_entry *entry; entry = container_of(attr, struct portio_sysfs_entry, attr); if (!entry->show) return -EIO; return entry->show(port, buf); } static const struct sysfs_ops portio_sysfs_ops = { .show = portio_type_show, }; static const struct kobj_type portio_attr_type = { .release = portio_release, .sysfs_ops = &portio_sysfs_ops, .default_groups = portio_groups, }; static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); int ret; mutex_lock(&idev->info_lock); if (!idev->info) { ret = -EINVAL; dev_err(dev, "the device has been unregistered\n"); goto out; } ret = sprintf(buf, "%s\n", idev->info->name); out: mutex_unlock(&idev->info_lock); return ret; } static DEVICE_ATTR_RO(name); static ssize_t version_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); int ret; mutex_lock(&idev->info_lock); if (!idev->info) { ret = -EINVAL; dev_err(dev, "the device has been unregistered\n"); goto out; } ret = sprintf(buf, "%s\n", idev->info->version); out: mutex_unlock(&idev->info_lock); return ret; } static DEVICE_ATTR_RO(version); static ssize_t event_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uio_device *idev = dev_get_drvdata(dev); return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event)); } static DEVICE_ATTR_RO(event); static struct attribute *uio_attrs[] = { &dev_attr_name.attr, &dev_attr_version.attr, &dev_attr_event.attr, NULL, }; ATTRIBUTE_GROUPS(uio); /* UIO class infrastructure */ static struct class uio_class = { .name = "uio", .dev_groups = uio_groups, }; static bool uio_class_registered; /* * device functions */ static int uio_dev_add_attributes(struct uio_device *idev) { int ret; int mi, pi; int map_found = 0; int portio_found = 0; struct uio_mem *mem; struct uio_map *map; struct uio_port *port; struct uio_portio *portio; for (mi = 0; mi < MAX_UIO_MAPS; mi++) { mem = &idev->info->mem[mi]; if (mem->size == 0) break; if (!map_found) { map_found = 1; idev->map_dir = kobject_create_and_add("maps", &idev->dev.kobj); if (!idev->map_dir) { ret = -ENOMEM; goto err_map; } } map = kzalloc(sizeof(*map), GFP_KERNEL); if (!map) { ret = -ENOMEM; goto err_map; } kobject_init(&map->kobj, &map_attr_type); map->mem = mem; mem->map = map; ret = kobject_add(&map->kobj, idev->map_dir, "map%d", mi); if (ret) goto err_map_kobj; ret = kobject_uevent(&map->kobj, KOBJ_ADD); if (ret) goto err_map_kobj; } for (pi = 0; pi < MAX_UIO_PORT_REGIONS; pi++) { port = &idev->info->port[pi]; if (port->size == 0) break; if (!portio_found) { portio_found = 1; idev->portio_dir = kobject_create_and_add("portio", &idev->dev.kobj); if (!idev->portio_dir) { ret = -ENOMEM; goto err_portio; } } portio = kzalloc(sizeof(*portio), GFP_KERNEL); if (!portio) { ret = -ENOMEM; goto err_portio; } kobject_init(&portio->kobj, &portio_attr_type); portio->port = port; port->portio = portio; ret = kobject_add(&portio->kobj, idev->portio_dir, "port%d", pi); if (ret) goto err_portio_kobj; ret = kobject_uevent(&portio->kobj, KOBJ_ADD); if (ret) goto err_portio_kobj; } return 0; err_portio: pi--; err_portio_kobj: for (; pi >= 0; pi--) { port = &idev->info->port[pi]; portio = port->portio; kobject_put(&portio->kobj); } kobject_put(idev->portio_dir); err_map: mi--; err_map_kobj: for (; mi >= 0; mi--) { mem = &idev->info->mem[mi]; map = mem->map; kobject_put(&map->kobj); } kobject_put(idev->map_dir); dev_err(&idev->dev, "error creating sysfs files (%d)\n", ret); return ret; } static void uio_dev_del_attributes(struct uio_device *idev) { int i; struct uio_mem *mem; struct uio_port *port; for (i = 0; i < MAX_UIO_MAPS; i++) { mem = &idev->info->mem[i]; if (mem->size == 0) break; kobject_put(&mem->map->kobj); } kobject_put(idev->map_dir); for (i = 0; i < MAX_UIO_PORT_REGIONS; i++) { port = &idev->info->port[i]; if (port->size == 0) break; kobject_put(&port->portio->kobj); } kobject_put(idev->portio_dir); } static int uio_get_minor(struct uio_device *idev) { int retval; mutex_lock(&minor_lock); retval = idr_alloc(&uio_idr, idev, 0, UIO_MAX_DEVICES, GFP_KERNEL); if (retval >= 0) { idev->minor = retval; retval = 0; } else if (retval == -ENOSPC) { dev_err(&idev->dev, "too many uio devices\n"); retval = -EINVAL; } mutex_unlock(&minor_lock); return retval; } static void uio_free_minor(unsigned long minor) { mutex_lock(&minor_lock); idr_remove(&uio_idr, minor); mutex_unlock(&minor_lock); } /** * uio_event_notify - trigger an interrupt event * @info: UIO device capabilities */ void uio_event_notify(struct uio_info *info) { struct uio_device *idev = info->uio_dev; atomic_inc(&idev->event); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_IN); } EXPORT_SYMBOL_GPL(uio_event_notify); /** * uio_interrupt_handler - hardware interrupt handler * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer * @dev_id: Pointer to the devices uio_device structure */ static irqreturn_t uio_interrupt_handler(int irq, void *dev_id) { struct uio_device *idev = (struct uio_device *)dev_id; irqreturn_t ret; ret = idev->info->handler(irq, idev->info); if (ret == IRQ_HANDLED) ret = IRQ_WAKE_THREAD; return ret; } /** * uio_interrupt_thread - irq thread handler * @irq: IRQ number * @dev_id: Pointer to the devices uio_device structure */ static irqreturn_t uio_interrupt_thread(int irq, void *dev_id) { struct uio_device *idev = (struct uio_device *)dev_id; uio_event_notify(idev->info); return IRQ_HANDLED; } struct uio_listener { struct uio_device *dev; s32 event_count; }; static int uio_open(struct inode *inode, struct file *filep) { struct uio_device *idev; struct uio_listener *listener; int ret = 0; mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); if (!idev) { ret = -ENODEV; mutex_unlock(&minor_lock); goto out; } get_device(&idev->dev); mutex_unlock(&minor_lock); if (!try_module_get(idev->owner)) { ret = -ENODEV; goto err_module_get; } listener = kmalloc(sizeof(*listener), GFP_KERNEL); if (!listener) { ret = -ENOMEM; goto err_alloc_listener; } listener->dev = idev; listener->event_count = atomic_read(&idev->event); filep->private_data = listener; mutex_lock(&idev->info_lock); if (!idev->info) { mutex_unlock(&idev->info_lock); ret = -EINVAL; goto err_infoopen; } if (idev->info->open) ret = idev->info->open(idev->info, inode); mutex_unlock(&idev->info_lock); if (ret) goto err_infoopen; return 0; err_infoopen: kfree(listener); err_alloc_listener: module_put(idev->owner); err_module_get: put_device(&idev->dev); out: return ret; } static int uio_fasync(int fd, struct file *filep, int on) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; return fasync_helper(fd, filep, on, &idev->async_queue); } static int uio_release(struct inode *inode, struct file *filep) { int ret = 0; struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; mutex_lock(&idev->info_lock); if (idev->info && idev->info->release) ret = idev->info->release(idev->info, inode); mutex_unlock(&idev->info_lock); module_put(idev->owner); kfree(listener); put_device(&idev->dev); return ret; } static __poll_t uio_poll(struct file *filep, poll_table *wait) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; __poll_t ret = 0; mutex_lock(&idev->info_lock); if (!idev->info || !idev->info->irq) ret = -EIO; mutex_unlock(&idev->info_lock); if (ret) return ret; poll_wait(filep, &idev->wait, wait); if (listener->event_count != atomic_read(&idev->event)) return EPOLLIN | EPOLLRDNORM; return 0; } static ssize_t uio_read(struct file *filep, char __user *buf, size_t count, loff_t *ppos) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; DECLARE_WAITQUEUE(wait, current); ssize_t retval = 0; s32 event_count; if (count != sizeof(s32)) return -EINVAL; add_wait_queue(&idev->wait, &wait); do { mutex_lock(&idev->info_lock); if (!idev->info || !idev->info->irq) { retval = -EIO; mutex_unlock(&idev->info_lock); break; } mutex_unlock(&idev->info_lock); set_current_state(TASK_INTERRUPTIBLE); event_count = atomic_read(&idev->event); if (event_count != listener->event_count) { __set_current_state(TASK_RUNNING); if (copy_to_user(buf, &event_count, count)) retval = -EFAULT; else { listener->event_count = event_count; retval = count; } break; } if (filep->f_flags & O_NONBLOCK) { retval = -EAGAIN; break; } if (signal_pending(current)) { retval = -ERESTARTSYS; break; } schedule(); } while (1); __set_current_state(TASK_RUNNING); remove_wait_queue(&idev->wait, &wait); return retval; } static ssize_t uio_write(struct file *filep, const char __user *buf, size_t count, loff_t *ppos) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; ssize_t retval; s32 irq_on; if (count != sizeof(s32)) return -EINVAL; if (copy_from_user(&irq_on, buf, count)) return -EFAULT; mutex_lock(&idev->info_lock); if (!idev->info) { retval = -EINVAL; goto out; } if (!idev->info->irq) { retval = -EIO; goto out; } if (!idev->info->irqcontrol) { retval = -ENOSYS; goto out; } retval = idev->info->irqcontrol(idev->info, irq_on); out: mutex_unlock(&idev->info_lock); return retval ? retval : sizeof(s32); } static int uio_find_mem_index(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; if (vma->vm_pgoff < MAX_UIO_MAPS) { if (idev->info->mem[vma->vm_pgoff].size == 0) return -1; return (int)vma->vm_pgoff; } return -1; } static vm_fault_t uio_vma_fault(struct vm_fault *vmf) { struct uio_device *idev = vmf->vma->vm_private_data; struct page *page; unsigned long offset; void *addr; vm_fault_t ret = 0; int mi; mutex_lock(&idev->info_lock); if (!idev->info) { ret = VM_FAULT_SIGBUS; goto out; } mi = uio_find_mem_index(vmf->vma); if (mi < 0) { ret = VM_FAULT_SIGBUS; goto out; } /* * We need to subtract mi because userspace uses offset = N*PAGE_SIZE * to use mem[N]. */ offset = (vmf->pgoff - mi) << PAGE_SHIFT; addr = (void *)(unsigned long)idev->info->mem[mi].addr + offset; if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL) page = virt_to_page(addr); else page = vmalloc_to_page(addr); get_page(page); vmf->page = page; out: mutex_unlock(&idev->info_lock); return ret; } static const struct vm_operations_struct uio_logical_vm_ops = { .fault = uio_vma_fault, }; static int uio_mmap_logical(struct vm_area_struct *vma) { vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); vma->vm_ops = &uio_logical_vm_ops; return 0; } static const struct vm_operations_struct uio_physical_vm_ops = { #ifdef CONFIG_HAVE_IOREMAP_PROT .access = generic_access_phys, #endif }; static int uio_mmap_physical(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; int mi = uio_find_mem_index(vma); struct uio_mem *mem; if (mi < 0) return -EINVAL; mem = idev->info->mem + mi; if (mem->addr & ~PAGE_MASK) return -ENODEV; if (vma->vm_end - vma->vm_start > mem->size) return -EINVAL; vma->vm_ops = &uio_physical_vm_ops; if (idev->info->mem[mi].memtype == UIO_MEM_PHYS) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); /* * We cannot use the vm_iomap_memory() helper here, * because vma->vm_pgoff is the map index we looked * up above in uio_find_mem_index(), rather than an * actual page offset into the mmap. * * So we just do the physical mmap without a page * offset. */ return remap_pfn_range(vma, vma->vm_start, mem->addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } static int uio_mmap_dma_coherent(struct vm_area_struct *vma) { struct uio_device *idev = vma->vm_private_data; struct uio_mem *mem; void *addr; int ret = 0; int mi; mi = uio_find_mem_index(vma); if (mi < 0) return -EINVAL; mem = idev->info->mem + mi; if (mem->addr & ~PAGE_MASK) return -ENODEV; if (mem->dma_addr & ~PAGE_MASK) return -ENODEV; if (!mem->dma_device) return -ENODEV; if (vma->vm_end - vma->vm_start > mem->size) return -EINVAL; dev_warn(mem->dma_device, "use of UIO_MEM_DMA_COHERENT is highly discouraged"); /* * UIO uses offset to index into the maps for a device. * We need to clear vm_pgoff for dma_mmap_coherent. */ vma->vm_pgoff = 0; addr = (void *)(uintptr_t)mem->addr; ret = dma_mmap_coherent(mem->dma_device, vma, addr, mem->dma_addr, vma->vm_end - vma->vm_start); vma->vm_pgoff = mi; return ret; } static int uio_mmap(struct file *filep, struct vm_area_struct *vma) { struct uio_listener *listener = filep->private_data; struct uio_device *idev = listener->dev; int mi; unsigned long requested_pages, actual_pages; int ret = 0; if (vma->vm_end < vma->vm_start) return -EINVAL; vma->vm_private_data = idev; mutex_lock(&idev->info_lock); if (!idev->info) { ret = -EINVAL; goto out; } mi = uio_find_mem_index(vma); if (mi < 0) { ret = -EINVAL; goto out; } requested_pages = vma_pages(vma); actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK) + idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT; if (requested_pages > actual_pages) { ret = -EINVAL; goto out; } if (idev->info->mmap) { ret = idev->info->mmap(idev->info, vma); goto out; } switch (idev->info->mem[mi].memtype) { case UIO_MEM_IOVA: case UIO_MEM_PHYS: ret = uio_mmap_physical(vma); break; case UIO_MEM_LOGICAL: case UIO_MEM_VIRTUAL: ret = uio_mmap_logical(vma); break; case UIO_MEM_DMA_COHERENT: ret = uio_mmap_dma_coherent(vma); break; default: ret = -EINVAL; } out: mutex_unlock(&idev->info_lock); return ret; } static const struct file_operations uio_fops = { .owner = THIS_MODULE, .open = uio_open, .release = uio_release, .read = uio_read, .write = uio_write, .mmap = uio_mmap, .poll = uio_poll, .fasync = uio_fasync, .llseek = noop_llseek, }; static int uio_major_init(void) { static const char name[] = "uio"; struct cdev *cdev = NULL; dev_t uio_dev = 0; int result; result = alloc_chrdev_region(&uio_dev, 0, UIO_MAX_DEVICES, name); if (result) goto out; result = -ENOMEM; cdev = cdev_alloc(); if (!cdev) goto out_unregister; cdev->owner = THIS_MODULE; cdev->ops = &uio_fops; kobject_set_name(&cdev->kobj, "%s", name); result = cdev_add(cdev, uio_dev, UIO_MAX_DEVICES); if (result) goto out_put; uio_major = MAJOR(uio_dev); uio_cdev = cdev; return 0; out_put: kobject_put(&cdev->kobj); out_unregister: unregister_chrdev_region(uio_dev, UIO_MAX_DEVICES); out: return result; } static void uio_major_cleanup(void) { unregister_chrdev_region(MKDEV(uio_major, 0), UIO_MAX_DEVICES); cdev_del(uio_cdev); } static int init_uio_class(void) { int ret; /* This is the first time in here, set everything up properly */ ret = uio_major_init(); if (ret) goto exit; ret = class_register(&uio_class); if (ret) { printk(KERN_ERR "class_register failed for uio\n"); goto err_class_register; } uio_class_registered = true; return 0; err_class_register: uio_major_cleanup(); exit: return ret; } static void release_uio_class(void) { uio_class_registered = false; class_unregister(&uio_class); uio_major_cleanup(); } static void uio_device_release(struct device *dev) { struct uio_device *idev = dev_get_drvdata(dev); kfree(idev); } /** * __uio_register_device - register a new userspace IO device * @owner: module that creates the new device * @parent: parent device * @info: UIO device capabilities * * returns zero on success or a negative error code. */ int __uio_register_device(struct module *owner, struct device *parent, struct uio_info *info) { struct uio_device *idev; int ret = 0; if (!uio_class_registered) return -EPROBE_DEFER; if (!parent || !info || !info->name || !info->version) return -EINVAL; info->uio_dev = NULL; idev = kzalloc(sizeof(*idev), GFP_KERNEL); if (!idev) { return -ENOMEM; } idev->owner = owner; idev->info = info; mutex_init(&idev->info_lock); init_waitqueue_head(&idev->wait); atomic_set(&idev->event, 0); ret = uio_get_minor(idev); if (ret) { kfree(idev); return ret; } device_initialize(&idev->dev); idev->dev.devt = MKDEV(uio_major, idev->minor); idev->dev.class = &uio_class; idev->dev.parent = parent; idev->dev.release = uio_device_release; dev_set_drvdata(&idev->dev, idev); ret = dev_set_name(&idev->dev, "uio%d", idev->minor); if (ret) goto err_device_create; ret = device_add(&idev->dev); if (ret) goto err_device_create; ret = uio_dev_add_attributes(idev); if (ret) goto err_uio_dev_add_attributes; info->uio_dev = idev; if (info->irq && (info->irq != UIO_IRQ_CUSTOM)) { /* * Note that we deliberately don't use devm_request_irq * here. The parent module can unregister the UIO device * and call pci_disable_msi, which requires that this * irq has been freed. However, the device may have open * FDs at the time of unregister and therefore may not be * freed until they are released. */ ret = request_threaded_irq(info->irq, uio_interrupt_handler, uio_interrupt_thread, info->irq_flags, info->name, idev); if (ret) { info->uio_dev = NULL; goto err_request_irq; } } return 0; err_request_irq: uio_dev_del_attributes(idev); err_uio_dev_add_attributes: device_del(&idev->dev); err_device_create: uio_free_minor(idev->minor); put_device(&idev->dev); return ret; } EXPORT_SYMBOL_GPL(__uio_register_device); static void devm_uio_unregister_device(struct device *dev, void *res) { uio_unregister_device(*(struct uio_info **)res); } /** * __devm_uio_register_device - Resource managed uio_register_device() * @owner: module that creates the new device * @parent: parent device * @info: UIO device capabilities * * returns zero on success or a negative error code. */ int __devm_uio_register_device(struct module *owner, struct device *parent, struct uio_info *info) { struct uio_info **ptr; int ret; ptr = devres_alloc(devm_uio_unregister_device, sizeof(*ptr), GFP_KERNEL); if (!ptr) return -ENOMEM; *ptr = info; ret = __uio_register_device(owner, parent, info); if (ret) { devres_free(ptr); return ret; } devres_add(parent, ptr); return 0; } EXPORT_SYMBOL_GPL(__devm_uio_register_device); /** * uio_unregister_device - unregister a industrial IO device * @info: UIO device capabilities * */ void uio_unregister_device(struct uio_info *info) { struct uio_device *idev; unsigned long minor; if (!info || !info->uio_dev) return; idev = info->uio_dev; minor = idev->minor; mutex_lock(&idev->info_lock); uio_dev_del_attributes(idev); if (info->irq && info->irq != UIO_IRQ_CUSTOM) free_irq(info->irq, idev); idev->info = NULL; mutex_unlock(&idev->info_lock); wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); uio_free_minor(minor); device_unregister(&idev->dev); return; } EXPORT_SYMBOL_GPL(uio_unregister_device); static int __init uio_init(void) { return init_uio_class(); } static void __exit uio_exit(void) { release_uio_class(); idr_destroy(&uio_idr); } module_init(uio_init) module_exit(uio_exit) MODULE_DESCRIPTION("Userspace IO core module"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2017 Intel Corporation. */ #if !defined(__HFI1_TRACE_MMU_H) || defined(TRACE_HEADER_MULTI_READ) #define __HFI1_TRACE_MMU_H #include <linux/tracepoint.h> #include <linux/trace_seq.h> #include "hfi.h" #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_mmu DECLARE_EVENT_CLASS(hfi1_mmu_rb_template, TP_PROTO(struct mmu_rb_node *node), TP_ARGS(node), TP_STRUCT__entry(__field(unsigned long, addr) __field(unsigned long, len) __field(unsigned int, refcount) ), TP_fast_assign(__entry->addr = node->addr; __entry->len = node->len; __entry->refcount = kref_read(&node->refcount); ), TP_printk("MMU node addr 0x%lx, len %lu, refcount %u", __entry->addr, __entry->len, __entry->refcount ) ); DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_insert, TP_PROTO(struct mmu_rb_node *node), TP_ARGS(node)); TRACE_EVENT(hfi1_mmu_rb_search, TP_PROTO(unsigned long addr, unsigned long len), TP_ARGS(addr, len), TP_STRUCT__entry(__field(unsigned long, addr) __field(unsigned long, len) ), TP_fast_assign(__entry->addr = addr; __entry->len = len; ), TP_printk("MMU node addr 0x%lx, len %lu", __entry->addr, __entry->len ) ); DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_mem_invalidate, TP_PROTO(struct mmu_rb_node *node), TP_ARGS(node)); DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_rb_evict, TP_PROTO(struct mmu_rb_node *node), TP_ARGS(node)); DEFINE_EVENT(hfi1_mmu_rb_template, hfi1_mmu_release_node, TP_PROTO(struct mmu_rb_node *node), TP_ARGS(node)); #endif /* __HFI1_TRACE_RC_H */ #undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE trace_mmu #include <trace/define_trace.h>
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _BCACHEFS_IO_READ_H #define _BCACHEFS_IO_READ_H #include "bkey_buf.h" struct bch_read_bio { struct bch_fs *c; u64 start_time; u64 submit_time; /* * Reads will often have to be split, and if the extent being read from * was checksummed or compressed we'll also have to allocate bounce * buffers and copy the data back into the original bio. * * If we didn't have to split, we have to save and restore the original * bi_end_io - @split below indicates which: */ union { struct bch_read_bio *parent; bio_end_io_t *end_io; }; /* * Saved copy of bio->bi_iter, from submission time - allows us to * resubmit on IO error, and also to copy data back to the original bio * when we're bouncing: */ struct bvec_iter bvec_iter; unsigned offset_into_extent; u16 flags; union { struct { u16 bounce:1, split:1, kmalloc:1, have_ioref:1, narrow_crcs:1, hole:1, retry:2, context:2; }; u16 _state; }; struct bch_devs_list devs_have; struct extent_ptr_decoded pick; /* * pos we read from - different from data_pos for indirect extents: */ u32 subvol; struct bpos read_pos; /* * start pos of data we read (may not be pos of data we want) - for * promote, narrow extents paths: */ enum btree_id data_btree; struct bpos data_pos; struct bversion version; struct promote_op *promote; struct bch_io_opts opts; struct work_struct work; struct bio bio; }; #define to_rbio(_bio) container_of((_bio), struct bch_read_bio, bio) struct bch_devs_mask; struct cache_promote_op; struct extent_ptr_decoded; int __bch2_read_indirect_extent(struct btree_trans *, unsigned *, struct bkey_buf *); static inline int bch2_read_indirect_extent(struct btree_trans *trans, enum btree_id *data_btree, unsigned *offset_into_extent, struct bkey_buf *k) { if (k->k->k.type != KEY_TYPE_reflink_p) return 0; *data_btree = BTREE_ID_reflink; return __bch2_read_indirect_extent(trans, offset_into_extent, k); } enum bch_read_flags { BCH_READ_RETRY_IF_STALE = 1 << 0, BCH_READ_MAY_PROMOTE = 1 << 1, BCH_READ_USER_MAPPED = 1 << 2, BCH_READ_NODECODE = 1 << 3, BCH_READ_LAST_FRAGMENT = 1 << 4, /* internal: */ BCH_READ_MUST_BOUNCE = 1 << 5, BCH_READ_MUST_CLONE = 1 << 6, BCH_READ_IN_RETRY = 1 << 7, }; int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *, struct bvec_iter, struct bpos, enum btree_id, struct bkey_s_c, unsigned, struct bch_io_failures *, unsigned); static inline void bch2_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio, struct bpos read_pos, enum btree_id data_btree, struct bkey_s_c k, unsigned offset_into_extent, unsigned flags) { __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos, data_btree, k, offset_into_extent, NULL, flags); } void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter, subvol_inum, struct bch_io_failures *, unsigned flags); static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio, subvol_inum inum) { struct bch_io_failures failed = { .nr = 0 }; BUG_ON(rbio->_state); rbio->c = c; rbio->start_time = local_clock(); rbio->subvol = inum.subvol; __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed, BCH_READ_RETRY_IF_STALE| BCH_READ_MAY_PROMOTE| BCH_READ_USER_MAPPED); } static inline struct bch_read_bio *rbio_init(struct bio *bio, struct bch_io_opts opts) { struct bch_read_bio *rbio = to_rbio(bio); rbio->_state = 0; rbio->promote = NULL; rbio->opts = opts; return rbio; } void bch2_fs_io_read_exit(struct bch_fs *); int bch2_fs_io_read_init(struct bch_fs *); #endif /* _BCACHEFS_IO_READ_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* Legend Silicon LGS-8GL5 DMB-TH OFDM demodulator driver Copyright (C) 2008 Sirius International (Hong Kong) Limited Timothy Lee <[email protected]> */ #ifndef LGS8GL5_H #define LGS8GL5_H #include <linux/dvb/frontend.h> struct lgs8gl5_config { /* the demodulator's i2c address */ u8 demod_address; }; #if IS_REACHABLE(CONFIG_DVB_LGS8GL5) extern struct dvb_frontend *lgs8gl5_attach( const struct lgs8gl5_config *config, struct i2c_adapter *i2c); #else static inline struct dvb_frontend *lgs8gl5_attach( const struct lgs8gl5_config *config, struct i2c_adapter *i2c) { printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__); return NULL; } #endif /* CONFIG_DVB_LGS8GL5 */ #endif /* LGS8GL5_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2024 - Google LLC * Author: Marc Zyngier <[email protected]> * * Primitive PAuth emulation for ERETAA/ERETAB. * * This code assumes that is is run from EL2, and that it is part of * the emulation of ERETAx for a guest hypervisor. That's a lot of * baked-in assumptions and shortcuts. * * Do no reuse for anything else! */ #include <linux/kvm_host.h> #include <asm/gpr-num.h> #include <asm/kvm_emulate.h> #include <asm/pointer_auth.h> /* PACGA Xd, Xn, Xm */ #define PACGA(d,n,m) \ asm volatile(__DEFINE_ASM_GPR_NUMS \ ".inst 0x9AC03000 |" \ "(.L__gpr_num_%[Rd] << 0) |" \ "(.L__gpr_num_%[Rn] << 5) |" \ "(.L__gpr_num_%[Rm] << 16)\n" \ : [Rd] "=r" ((d)) \ : [Rn] "r" ((n)), [Rm] "r" ((m))) static u64 compute_pac(struct kvm_vcpu *vcpu, u64 ptr, struct ptrauth_key ikey) { struct ptrauth_key gkey; u64 mod, pac = 0; preempt_disable(); if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU)) mod = __vcpu_sys_reg(vcpu, SP_EL2); else mod = read_sysreg(sp_el1); gkey.lo = read_sysreg_s(SYS_APGAKEYLO_EL1); gkey.hi = read_sysreg_s(SYS_APGAKEYHI_EL1); __ptrauth_key_install_nosync(APGA, ikey); isb(); PACGA(pac, ptr, mod); isb(); __ptrauth_key_install_nosync(APGA, gkey); preempt_enable(); /* PAC in the top 32bits */ return pac; } static bool effective_tbi(struct kvm_vcpu *vcpu, bool bit55) { u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); bool tbi, tbid; /* * Since we are authenticating an instruction address, we have * to take TBID into account. If E2H==0, ignore VA[55], as * TCR_EL2 only has a single TBI/TBID. If VA[55] was set in * this case, this is likely a guest bug... */ if (!vcpu_el2_e2h_is_set(vcpu)) { tbi = tcr & BIT(20); tbid = tcr & BIT(29); } else if (bit55) { tbi = tcr & TCR_TBI1; tbid = tcr & TCR_TBID1; } else { tbi = tcr & TCR_TBI0; tbid = tcr & TCR_TBID0; } return tbi && !tbid; } static int compute_bottom_pac(struct kvm_vcpu *vcpu, bool bit55) { static const int maxtxsz = 39; // Revisit these two values once static const int mintxsz = 16; // (if) we support TTST/LVA/LVA2 u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2); int txsz; if (!vcpu_el2_e2h_is_set(vcpu) || !bit55) txsz = FIELD_GET(TCR_T0SZ_MASK, tcr); else txsz = FIELD_GET(TCR_T1SZ_MASK, tcr); return 64 - clamp(txsz, mintxsz, maxtxsz); } static u64 compute_pac_mask(struct kvm_vcpu *vcpu, bool bit55) { int bottom_pac; u64 mask; bottom_pac = compute_bottom_pac(vcpu, bit55); mask = GENMASK(54, bottom_pac); if (!effective_tbi(vcpu, bit55)) mask |= GENMASK(63, 56); return mask; } static u64 to_canonical_addr(struct kvm_vcpu *vcpu, u64 ptr, u64 mask) { bool bit55 = !!(ptr & BIT(55)); if (bit55) return ptr | mask; return ptr & ~mask; } static u64 corrupt_addr(struct kvm_vcpu *vcpu, u64 ptr) { bool bit55 = !!(ptr & BIT(55)); u64 mask, error_code; int shift; if (effective_tbi(vcpu, bit55)) { mask = GENMASK(54, 53); shift = 53; } else { mask = GENMASK(62, 61); shift = 61; } if (esr_iss_is_eretab(kvm_vcpu_get_esr(vcpu))) error_code = 2 << shift; else error_code = 1 << shift; ptr &= ~mask; ptr |= error_code; return ptr; } /* * Authenticate an ERETAA/ERETAB instruction, returning true if the * authentication succeeded and false otherwise. In all cases, *elr * contains the VA to ERET to. Potential exception injection is left * to the caller. */ bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr) { u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL2); u64 esr = kvm_vcpu_get_esr(vcpu); u64 ptr, cptr, pac, mask; struct ptrauth_key ikey; *elr = ptr = vcpu_read_sys_reg(vcpu, ELR_EL2); /* We assume we're already in the context of an ERETAx */ if (esr_iss_is_eretab(esr)) { if (!(sctlr & SCTLR_EL1_EnIB)) return true; ikey.lo = __vcpu_sys_reg(vcpu, APIBKEYLO_EL1); ikey.hi = __vcpu_sys_reg(vcpu, APIBKEYHI_EL1); } else { if (!(sctlr & SCTLR_EL1_EnIA)) return true; ikey.lo = __vcpu_sys_reg(vcpu, APIAKEYLO_EL1); ikey.hi = __vcpu_sys_reg(vcpu, APIAKEYHI_EL1); } mask = compute_pac_mask(vcpu, !!(ptr & BIT(55))); cptr = to_canonical_addr(vcpu, ptr, mask); pac = compute_pac(vcpu, cptr, ikey); /* * Slightly deviate from the pseudocode: if we have a PAC * match with the signed pointer, then it must be good. * Anything after this point is pure error handling. */ if ((pac & mask) == (ptr & mask)) { *elr = cptr; return true; } /* * Authentication failed, corrupt the canonical address if * PAuth2 isn't implemented, or some XORing if it is. */ if (!kvm_has_pauth(vcpu->kvm, PAuth2)) cptr = corrupt_addr(vcpu, cptr); else cptr = ptr ^ (pac & mask); *elr = cptr; return false; }
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2019 Mellanox Technologies. */ #ifndef __MLX5_RSC_DUMP_H #define __MLX5_RSC_DUMP_H #include <linux/mlx5/rsc_dump.h> #include <linux/mlx5/driver.h> #include "mlx5_core.h" #define MLX5_RSC_DUMP_ALL 0xFFFF struct mlx5_rsc_dump_cmd; struct mlx5_rsc_dump; struct mlx5_rsc_dump *mlx5_rsc_dump_create(struct mlx5_core_dev *dev); void mlx5_rsc_dump_destroy(struct mlx5_core_dev *dev); int mlx5_rsc_dump_init(struct mlx5_core_dev *dev); void mlx5_rsc_dump_cleanup(struct mlx5_core_dev *dev); struct mlx5_rsc_dump_cmd *mlx5_rsc_dump_cmd_create(struct mlx5_core_dev *dev, struct mlx5_rsc_key *key); void mlx5_rsc_dump_cmd_destroy(struct mlx5_rsc_dump_cmd *cmd); int mlx5_rsc_dump_next(struct mlx5_core_dev *dev, struct mlx5_rsc_dump_cmd *cmd, struct page *page, int *size); #endif
// SPDX-License-Identifier: GPL-2.0 OR MIT // // Device Tree Source for i.MX6DL based congatec QMX6 // System on Module // // Copyright 2018-2021 General Electric Company // Copyright 2018-2021 Collabora // Copyright 2016 congatec AG #include "imx6dl.dtsi" #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/sound/fsl-imx-audmux.h> / { memory@10000000 { reg = <0x10000000 0x40000000>; }; reg_3p3v: 3p3v { compatible = "regulator-fixed"; regulator-name = "3P3V"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; i2cmux { compatible = "i2c-mux-gpio"; #address-cells = <1>; #size-cells = <0>; mux-gpios = <&gpio6 9 GPIO_ACTIVE_HIGH>; i2c-parent = <&i2c2>; i2c5: i2c@0 { reg = <0>; #address-cells = <1>; #size-cells = <0>; }; i2c6: i2c@1 { reg = <1>; #address-cells = <1>; #size-cells = <0>; }; }; }; &audmux { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_audmux>; mux-ssi1 { fsl,audmux-port = <MX51_AUDMUX_PORT1_SSI0>; fsl,port-config = < (IMX_AUDMUX_V2_PTCR_TFSDIR | IMX_AUDMUX_V2_PTCR_TFSEL(MX51_AUDMUX_PORT6) | IMX_AUDMUX_V2_PTCR_TCLKDIR | IMX_AUDMUX_V2_PTCR_TCSEL(MX51_AUDMUX_PORT6) | IMX_AUDMUX_V2_PTCR_SYN) IMX_AUDMUX_V2_PDCR_RXDSEL(MX51_AUDMUX_PORT6) >; }; mux-aud6 { fsl,audmux-port = <MX51_AUDMUX_PORT6>; fsl,port-config = < IMX_AUDMUX_V2_PTCR_SYN IMX_AUDMUX_V2_PDCR_RXDSEL(MX51_AUDMUX_PORT1_SSI0) >; }; }; &clks { clocks = <&rtc_sqw>; clock-names = "ckil"; assigned-clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, <&clks IMX6QDL_CLK_LDB_DI1_SEL>; assigned-clock-parents = <&clks IMX6QDL_CLK_PLL2_PFD0_352M>, <&clks IMX6QDL_CLK_PLL2_PFD0_352M>; }; &ecspi1 { cs-gpios = <&gpio3 19 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_spi1>; status = "okay"; flash@0 { #address-cells = <1>; #size-cells = <1>; compatible = "sst,sst25vf032b", "jedec,spi-nor"; spi-max-frequency = <20000000>; reg = <0>; partition@0 { label = "bootloader"; reg = <0x0000000 0x100000>; }; partition@100000 { label = "user"; reg = <0x0100000 0x2fc000>; }; partition@3fc000 { label = "reserved"; reg = <0x03fc000 0x4000>; read-only; }; }; }; &fec { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet &pinctrl_phy_reset>; phy-mode = "rgmii-id"; phy-reset-gpios = <&gpio3 23 GPIO_ACTIVE_LOW>; fsl,magic-packet; phy-handle = <&phy0>; mdio { #address-cells = <1>; #size-cells = <0>; phy0: ethernet-phy@6 { reg = <6>; qca,clk-out-frequency = <125000000>; }; }; }; &i2c1 { clock-frequency = <100000>; pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c1>; pinctrl-1 = <&pinctrl_i2c1_gpio>; scl-gpios = <&gpio3 21 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; sda-gpios = <&gpio3 28 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; status = "okay"; }; &i2c2 { clock-frequency = <100000>; pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c2>; pinctrl-1 = <&pinctrl_i2c2_gpio>; scl-gpios = <&gpio4 12 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; sda-gpios = <&gpio4 13 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; status = "okay"; }; &i2c3 { clock-frequency = <100000>; pinctrl-names = "default", "gpio"; pinctrl-0 = <&pinctrl_i2c3>; pinctrl-1 = <&pinctrl_i2c3_gpio>; scl-gpios = <&gpio1 3 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; sda-gpios = <&gpio1 6 (GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN)>; status = "okay"; rtc: m41t62@68 { compatible = "st,m41t62"; reg = <0x68>; rtc_sqw: clock { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <32768>; }; }; }; &i2c6 { pmic@8 { compatible = "fsl,pfuze100"; reg = <0x08>; regulators { sw1a_reg: sw1ab { regulator-min-microvolt = <300000>; regulator-max-microvolt = <1875000>; regulator-boot-on; regulator-always-on; regulator-ramp-delay = <6250>; }; sw1c_reg: sw1c { regulator-min-microvolt = <300000>; regulator-max-microvolt = <1875000>; regulator-boot-on; regulator-always-on; regulator-ramp-delay = <6250>; }; sw2_reg: sw2 { regulator-min-microvolt = <800000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; }; sw3a_reg: sw3a { regulator-min-microvolt = <400000>; regulator-max-microvolt = <1975000>; regulator-boot-on; regulator-always-on; }; sw3b_reg: sw3b { regulator-min-microvolt = <400000>; regulator-max-microvolt = <1975000>; regulator-boot-on; regulator-always-on; }; sw4_reg: sw4 { regulator-min-microvolt = <675000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; }; swbst_reg: swbst { regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5150000>; }; snvs_reg: vsnvs { regulator-min-microvolt = <1000000>; regulator-max-microvolt = <3000000>; regulator-boot-on; regulator-always-on; }; vref_reg: vrefddr { regulator-boot-on; regulator-always-on; }; /* * keep VGEN3, VGEN4 and VGEN5 enabled in order to * maintain backward compatibility with hw-rev. A.0 */ vgen3_reg: vgen3 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-always-on; }; vgen4_reg: vgen4 { regulator-min-microvolt = <2500000>; regulator-max-microvolt = <2500000>; regulator-always-on; }; vgen5_reg: vgen5 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-always-on; }; /* supply voltage for eMMC */ vgen6_reg: vgen6 { regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-boot-on; regulator-always-on; }; }; }; }; &pcie { reset-gpio = <&gpio1 20 0>; }; &pwm4 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_pwm4>; }; &reg_arm { vin-supply = <&sw1a_reg>; }; &reg_pu { vin-supply = <&sw1c_reg>; }; &reg_soc { vin-supply = <&sw1c_reg>; }; &snvs_poweroff { status = "okay"; }; &uart2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart2>; status = "okay"; }; &uart3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_uart3>; status = "okay"; }; &usbh1 { /* Connected to USB-Hub SMSC USB2514, provides P0, P2, P3, P4 on Qseven connector */ vbus-supply = <&reg_5v>; status = "okay"; }; &usbotg { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usbotg>; }; &usdhc2 { /* MicroSD card slot */ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc2>; cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>; no-1-8-v; keep-power-in-suspend; wakeup-source; vmmc-supply = <&reg_3p3v>; status = "okay"; }; &usdhc3 { /* eMMC module */ pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; non-removable; bus-width = <8>; no-1-8-v; keep-power-in-suspend; wakeup-source; vmmc-supply = <&reg_3p3v>; status = "okay"; }; &wdog1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_wdog>; fsl,ext-reset-output; }; &iomuxc { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_hog>; pinctrl_audmux: audmuxgrp { fsl,pins = < MX6QDL_PAD_DI0_PIN2__AUD6_TXD 0x110b0 /* Q7[67] HDA_SDO */ MX6QDL_PAD_DI0_PIN3__AUD6_TXFS 0x30b0 /* Q7[59] HDA_SYNC */ MX6QDL_PAD_DI0_PIN4__AUD6_RXD 0x30b0 /* Q7[65] HDA_SDI */ MX6QDL_PAD_DI0_PIN15__AUD6_TXC 0x30b0 /* Q7[63] HDA_BITCLK */ >; }; /* PHY is on System on Module, Q7[3-15] have Ethernet lines */ pinctrl_enet: enetgrp { fsl,pins = < MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0 MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0 MX6QDL_PAD_RGMII_TXC__RGMII_TXC 0x1b030 MX6QDL_PAD_RGMII_TD0__RGMII_TD0 0x1b030 MX6QDL_PAD_RGMII_TD1__RGMII_TD1 0x1b030 MX6QDL_PAD_RGMII_TD2__RGMII_TD2 0x1b030 MX6QDL_PAD_RGMII_TD3__RGMII_TD3 0x1b030 MX6QDL_PAD_RGMII_TX_CTL__RGMII_TX_CTL 0x1b030 MX6QDL_PAD_ENET_REF_CLK__ENET_TX_CLK 0x1b0b0 MX6QDL_PAD_RGMII_RXC__RGMII_RXC 0x1b030 MX6QDL_PAD_RGMII_RD0__RGMII_RD0 0x1b030 MX6QDL_PAD_RGMII_RD1__RGMII_RD1 0x1b030 MX6QDL_PAD_RGMII_RD2__RGMII_RD2 0x1b030 MX6QDL_PAD_RGMII_RD3__RGMII_RD3 0x1b030 MX6QDL_PAD_RGMII_RX_CTL__RGMII_RX_CTL 0x1b030 MX6QDL_PAD_ENET_TX_EN__ENET_TX_EN 0x1b0b0 >; }; pinctrl_hog: hoggrp { fsl,pins = < MX6QDL_PAD_GPIO_2__GPIO1_IO02 0x80000000 /* PCIE_WAKE_B */ MX6QDL_PAD_NANDF_WP_B__GPIO6_IO09 0x80000000 /* I2C multiplexer */ MX6QDL_PAD_NANDF_D6__GPIO2_IO06 0x80000000 /* SD4_CD# */ MX6QDL_PAD_NANDF_D7__GPIO2_IO07 0x80000000 /* SD4_WP */ MX6QDL_PAD_CSI0_MCLK__CCM_CLKO1 0x80000000 /* Camera MCLK */ >; }; pinctrl_i2c1: i2c1grp { fsl,pins = < MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1 /* Q7[66] I2C_CLK */ MX6QDL_PAD_EIM_D28__I2C1_SDA 0x4001b8b1 /* Q7[68] I2C_DAT */ >; }; pinctrl_i2c1_gpio: i2c1-gpiogrp { fsl,pins = < MX6QDL_PAD_EIM_D21__GPIO3_IO21 0x1b0b0 /* Q7[66] I2C_CLK */ MX6QDL_PAD_EIM_D28__GPIO3_IO28 0x1b0b0 /* Q7[68] I2C_DAT */ >; }; pinctrl_i2c2: i2c2grp { fsl,pins = < MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1 /* Q7[152] SDVO_CTRL_CLK */ MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1 /* Q7[150] SDVO_CTRL_DAT */ >; }; pinctrl_i2c2_gpio: i2c2-gpiogrp { fsl,pins = < MX6QDL_PAD_KEY_COL3__GPIO4_IO12 0x1b0b0 /* Q7[152] SDVO_CTRL_CLK */ MX6QDL_PAD_KEY_ROW3__GPIO4_IO13 0x1b0b0 /* Q7[150] SDVO_CTRL_DAT */ >; }; pinctrl_i2c3: i2c3grp { fsl,pins = < MX6QDL_PAD_GPIO_3__I2C3_SCL 0x4001b8b1 /* Q7[60] SMB_CLK */ MX6QDL_PAD_GPIO_6__I2C3_SDA 0x4001b8b1 /* Q7[62] SMB_DAT */ >; }; pinctrl_i2c3_gpio: i2c3-gpiogrp { fsl,pins = < MX6QDL_PAD_GPIO_3__GPIO1_IO03 0x1b0b0 /* Q7[60] SMB_CLK */ MX6QDL_PAD_GPIO_6__GPIO1_IO06 0x1b0b0 /* Q7[62] SMB_DAT */ >; }; pinctrl_phy_reset: phy-resetgrp { fsl,pins = < MX6QDL_PAD_EIM_D23__GPIO3_IO23 0x1b0b0 /* RGMII Phy Reset */ >; }; pinctrl_pwm4: pwm4grp { fsl,pins = < MX6QDL_PAD_SD1_CMD__PWM4_OUT 0x1b0b1 /* Q7[123] LVDS_BLT_CTRL */ >; }; pinctrl_q7_backlight_enable: q7-backlight-enablegrp { fsl,pins = < MX6QDL_PAD_GPIO_9__GPIO1_IO09 0x1b0b0 /* Q7[112] LVDS_BLEN */ >; }; pinctrl_q7_gpio0: q7-gpio0grp { fsl,pins = < MX6QDL_PAD_EIM_A25__GPIO5_IO02 0x1b0b0 /* Q7[185] GPIO0 */ >; }; pinctrl_q7_gpio1: q7-gpio1grp { fsl,pins = < MX6QDL_PAD_GPIO_8__GPIO1_IO08 0x1b0b0 /* Q7[186] GPIO1 */ >; }; pinctrl_q7_gpio2: q7-gpio2grp { fsl,pins = < MX6QDL_PAD_DISP0_DAT5__GPIO4_IO26 0x1b0b0 /* Q7[187] GPIO2 */ >; }; pinctrl_q7_gpio3: q7-gpio3grp { fsl,pins = < MX6QDL_PAD_DISP0_DAT6__GPIO4_IO27 0x1b0b0 /* Q7[188] GPIO3 */ >; }; pinctrl_q7_gpio4: q7-gpio4grp { fsl,pins = < MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0 /* Q7[189] GPIO4 */ >; }; pinctrl_q7_gpio5: q7-gpio5grp { fsl,pins = < MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x1b0b0 /* Q7[190] GPIO5 */ >; }; pinctrl_q7_gpio6: q7-gpio6grp { fsl,pins = < MX6QDL_PAD_GPIO_16__GPIO7_IO11 0x1b0b0 /* Q7[191] GPIO6 */ >; }; pinctrl_q7_gpio7: q7-gpio7grp { fsl,pins = < MX6QDL_PAD_KEY_COL4__GPIO4_IO14 0x1b0b0 /* Q7[192] GPIO7 */ >; }; pinctrl_q7_hda_reset: q7-hda-resetgrp { fsl,pins = < MX6QDL_PAD_NANDF_ALE__GPIO6_IO08 0x1b0b0 /* Q7[61] HDA_RST_N */ >; }; pinctrl_q7_lcd_power: lcd-powergrp { fsl,pins = < MX6QDL_PAD_GPIO_7__GPIO1_IO07 0x1b0b0 /* Q7[111] LVDS_PPEN */ >; }; pinctrl_q7_sdio_power: q7-sdio-powergrp { fsl,pins = < MX6QDL_PAD_DISP0_DAT9__GPIO4_IO30 0x1b0b0 /* Q7[47] SDIO_PWR# */ >; }; pinctrl_q7_sleep_button: q7-sleep-buttongrp { fsl,pins = < MX6QDL_PAD_KEY_ROW0__GPIO4_IO07 0x1b0b0 /* Q7[21] SLP_BTN# */ >; }; pinctrl_q7_spi_cs1: spi-cs1grp { fsl,pins = < MX6QDL_PAD_DISP0_DAT4__GPIO4_IO25 0x1b0b0 /* Q7[202] SPI_CS1# */ >; }; /* SPI1 bus does not leave System on Module */ pinctrl_spi1: spi1grp { fsl,pins = < MX6QDL_PAD_EIM_D16__ECSPI1_SCLK 0x100b1 MX6QDL_PAD_EIM_D17__ECSPI1_MISO 0x100b1 MX6QDL_PAD_EIM_D18__ECSPI1_MOSI 0x100b1 MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x1b0b0 >; }; /* Debug connector on Q7 module */ pinctrl_uart2: uart2grp { fsl,pins = < MX6QDL_PAD_EIM_D26__UART2_TX_DATA 0x1b0b1 MX6QDL_PAD_EIM_D27__UART2_RX_DATA 0x1b0b1 >; }; pinctrl_uart3: uart3grp { fsl,pins = < MX6QDL_PAD_EIM_D25__UART3_RX_DATA 0x1b0b1 /* Q7[177] UART0_RX */ MX6QDL_PAD_EIM_D24__UART3_TX_DATA 0x1b0b1 /* Q7[171] UART0_TX */ >; }; pinctrl_usbotg: usbotggrp { fsl,pins = < MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x17059 /* Q7[92] USB_ID */ >; }; /* µSD card slot on Q7 module */ pinctrl_usdhc2: usdhc2grp { fsl,pins = < MX6QDL_PAD_SD2_CMD__SD2_CMD 0x17059 MX6QDL_PAD_SD2_CLK__SD2_CLK 0x10059 MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x17059 MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x17059 MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x17059 MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x17059 MX6QDL_PAD_GPIO_4__GPIO1_IO04 0x1b0b0 /* SD2_CD */ >; }; /* eMMC module on Q7 module */ pinctrl_usdhc3: usdhc3grp { fsl,pins = < MX6QDL_PAD_SD3_CMD__SD3_CMD 0x17059 MX6QDL_PAD_SD3_CLK__SD3_CLK 0x10059 MX6QDL_PAD_SD3_DAT0__SD3_DATA0 0x17059 MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059 MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059 MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059 MX6QDL_PAD_SD3_DAT4__SD3_DATA4 0x17059 MX6QDL_PAD_SD3_DAT5__SD3_DATA5 0x17059 MX6QDL_PAD_SD3_DAT6__SD3_DATA6 0x17059 MX6QDL_PAD_SD3_DAT7__SD3_DATA7 0x17059 >; }; pinctrl_usdhc4: usdhc4grp { fsl,pins = < MX6QDL_PAD_SD4_CMD__SD4_CMD 0x17059 /* Q7[45] SDIO_CMD */ MX6QDL_PAD_SD4_CLK__SD4_CLK 0x17059 /* Q7[42] SDIO_CLK */ MX6QDL_PAD_SD4_DAT1__SD4_DATA1 0x17059 /* Q7[48] SDIO_DAT1 */ MX6QDL_PAD_SD4_DAT0__SD4_DATA0 0x17059 /* Q7[49] SDIO_DAT0 */ MX6QDL_PAD_SD4_DAT3__SD4_DATA3 0x17059 /* Q7[50] SDIO_DAT3 */ MX6QDL_PAD_SD4_DAT2__SD4_DATA2 0x17059 /* Q7[51] SDIO_DAT2 */ >; }; pinctrl_wdog: wdoggrp { fsl,pins = < MX6QDL_PAD_DISP0_DAT8__WDOG1_B 0x1b0b0 /* Watchdog output signal */ >; }; };
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2011 Patrick McHardy <[email protected]> */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/netfilter/xt_devgroup.h> #include <linux/netfilter/x_tables.h> MODULE_AUTHOR("Patrick McHardy <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: Device group match"); MODULE_ALIAS("ipt_devgroup"); MODULE_ALIAS("ip6t_devgroup"); static bool devgroup_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_devgroup_info *info = par->matchinfo; if (info->flags & XT_DEVGROUP_MATCH_SRC && (((info->src_group ^ xt_in(par)->group) & info->src_mask ? 1 : 0) ^ ((info->flags & XT_DEVGROUP_INVERT_SRC) ? 1 : 0))) return false; if (info->flags & XT_DEVGROUP_MATCH_DST && (((info->dst_group ^ xt_out(par)->group) & info->dst_mask ? 1 : 0) ^ ((info->flags & XT_DEVGROUP_INVERT_DST) ? 1 : 0))) return false; return true; } static int devgroup_mt_checkentry(const struct xt_mtchk_param *par) { const struct xt_devgroup_info *info = par->matchinfo; if (info->flags & ~(XT_DEVGROUP_MATCH_SRC | XT_DEVGROUP_INVERT_SRC | XT_DEVGROUP_MATCH_DST | XT_DEVGROUP_INVERT_DST)) return -EINVAL; if (info->flags & XT_DEVGROUP_MATCH_SRC && par->hook_mask & ~((1 << NF_INET_PRE_ROUTING) | (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD))) return -EINVAL; if (info->flags & XT_DEVGROUP_MATCH_DST && par->hook_mask & ~((1 << NF_INET_FORWARD) | (1 << NF_INET_LOCAL_OUT) | (1 << NF_INET_POST_ROUTING))) return -EINVAL; return 0; } static struct xt_match devgroup_mt_reg __read_mostly = { .name = "devgroup", .match = devgroup_mt, .checkentry = devgroup_mt_checkentry, .matchsize = sizeof(struct xt_devgroup_info), .family = NFPROTO_UNSPEC, .me = THIS_MODULE }; static int __init devgroup_mt_init(void) { return xt_register_match(&devgroup_mt_reg); } static void __exit devgroup_mt_exit(void) { xt_unregister_match(&devgroup_mt_reg); } module_init(devgroup_mt_init); module_exit(devgroup_mt_exit);
// SPDX-License-Identifier: GPL-2.0-or-later /* * DA7213 ALSA SoC Codec Driver * * Copyright (c) 2013 Dialog Semiconductor * * Author: Adam Thomson <[email protected]> * Based on DA9055 ALSA SoC codec driver. */ #include <linux/acpi.h> #include <linux/of.h> #include <linux/property.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <linux/pm_runtime.h> #include <linux/units.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include <sound/da7213.h> #include "da7213.h" /* Gain and Volume */ static const DECLARE_TLV_DB_RANGE(aux_vol_tlv, /* -54dB */ 0x0, 0x11, TLV_DB_SCALE_ITEM(-5400, 0, 0), /* -52.5dB to 15dB */ 0x12, 0x3f, TLV_DB_SCALE_ITEM(-5250, 150, 0) ); static const DECLARE_TLV_DB_RANGE(digital_gain_tlv, 0x0, 0x07, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1), /* -78dB to 12dB */ 0x08, 0x7f, TLV_DB_SCALE_ITEM(-7800, 75, 0) ); static const DECLARE_TLV_DB_RANGE(alc_analog_gain_tlv, 0x0, 0x0, TLV_DB_SCALE_ITEM(TLV_DB_GAIN_MUTE, 0, 1), /* 0dB to 36dB */ 0x01, 0x07, TLV_DB_SCALE_ITEM(0, 600, 0) ); static const DECLARE_TLV_DB_SCALE(mic_vol_tlv, -600, 600, 0); static const DECLARE_TLV_DB_SCALE(mixin_gain_tlv, -450, 150, 0); static const DECLARE_TLV_DB_SCALE(eq_gain_tlv, -1050, 150, 0); static const DECLARE_TLV_DB_SCALE(hp_vol_tlv, -5700, 100, 0); static const DECLARE_TLV_DB_SCALE(lineout_vol_tlv, -4800, 100, 0); static const DECLARE_TLV_DB_SCALE(alc_threshold_tlv, -9450, 150, 0); static const DECLARE_TLV_DB_SCALE(alc_gain_tlv, 0, 600, 0); static const DECLARE_TLV_DB_SCALE(da7213_tonegen_gain_tlv, -4500, 300, 0); /* ADC and DAC voice mode (8kHz) high pass cutoff value */ static const char * const da7213_voice_hpf_corner_txt[] = { "2.5Hz", "25Hz", "50Hz", "100Hz", "150Hz", "200Hz", "300Hz", "400Hz" }; static SOC_ENUM_SINGLE_DECL(da7213_dac_voice_hpf_corner, DA7213_DAC_FILTERS1, DA7213_VOICE_HPF_CORNER_SHIFT, da7213_voice_hpf_corner_txt); static SOC_ENUM_SINGLE_DECL(da7213_adc_voice_hpf_corner, DA7213_ADC_FILTERS1, DA7213_VOICE_HPF_CORNER_SHIFT, da7213_voice_hpf_corner_txt); /* ADC and DAC high pass filter cutoff value */ static const char * const da7213_audio_hpf_corner_txt[] = { "Fs/24000", "Fs/12000", "Fs/6000", "Fs/3000" }; static SOC_ENUM_SINGLE_DECL(da7213_dac_audio_hpf_corner, DA7213_DAC_FILTERS1 , DA7213_AUDIO_HPF_CORNER_SHIFT, da7213_audio_hpf_corner_txt); static SOC_ENUM_SINGLE_DECL(da7213_adc_audio_hpf_corner, DA7213_ADC_FILTERS1, DA7213_AUDIO_HPF_CORNER_SHIFT, da7213_audio_hpf_corner_txt); static const char * const da7213_tonegen_dtmf_key_txt[] = { "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "A", "B", "C", "D", "*", "#" }; static const struct soc_enum da7213_tonegen_dtmf_key = SOC_ENUM_SINGLE(DA7213_TONE_GEN_CFG1, DA7213_DTMF_REG_SHIFT, DA7213_DTMF_REG_MAX, da7213_tonegen_dtmf_key_txt); static const char * const da7213_tonegen_swg_sel_txt[] = { "Sum", "SWG1", "SWG2", "Sum" }; static const struct soc_enum da7213_tonegen_swg_sel = SOC_ENUM_SINGLE(DA7213_TONE_GEN_CFG2, DA7213_SWG_SEL_SHIFT, DA7213_SWG_SEL_MAX, da7213_tonegen_swg_sel_txt); /* Gain ramping rate value */ static const char * const da7213_gain_ramp_rate_txt[] = { "nominal rate * 8", "nominal rate * 16", "nominal rate / 16", "nominal rate / 32" }; static SOC_ENUM_SINGLE_DECL(da7213_gain_ramp_rate, DA7213_GAIN_RAMP_CTRL, DA7213_GAIN_RAMP_RATE_SHIFT, da7213_gain_ramp_rate_txt); /* DAC noise gate setup time value */ static const char * const da7213_dac_ng_setup_time_txt[] = { "256 samples", "512 samples", "1024 samples", "2048 samples" }; static SOC_ENUM_SINGLE_DECL(da7213_dac_ng_setup_time, DA7213_DAC_NG_SETUP_TIME, DA7213_DAC_NG_SETUP_TIME_SHIFT, da7213_dac_ng_setup_time_txt); /* DAC noise gate rampup rate value */ static const char * const da7213_dac_ng_rampup_txt[] = { "0.02 ms/dB", "0.16 ms/dB" }; static SOC_ENUM_SINGLE_DECL(da7213_dac_ng_rampup_rate, DA7213_DAC_NG_SETUP_TIME, DA7213_DAC_NG_RAMPUP_RATE_SHIFT, da7213_dac_ng_rampup_txt); /* DAC noise gate rampdown rate value */ static const char * const da7213_dac_ng_rampdown_txt[] = { "0.64 ms/dB", "20.48 ms/dB" }; static SOC_ENUM_SINGLE_DECL(da7213_dac_ng_rampdown_rate, DA7213_DAC_NG_SETUP_TIME, DA7213_DAC_NG_RAMPDN_RATE_SHIFT, da7213_dac_ng_rampdown_txt); /* DAC soft mute rate value */ static const char * const da7213_dac_soft_mute_rate_txt[] = { "1", "2", "4", "8", "16", "32", "64" }; static SOC_ENUM_SINGLE_DECL(da7213_dac_soft_mute_rate, DA7213_DAC_FILTERS5, DA7213_DAC_SOFTMUTE_RATE_SHIFT, da7213_dac_soft_mute_rate_txt); /* ALC Attack Rate select */ static const char * const da7213_alc_attack_rate_txt[] = { "44/fs", "88/fs", "176/fs", "352/fs", "704/fs", "1408/fs", "2816/fs", "5632/fs", "11264/fs", "22528/fs", "45056/fs", "90112/fs", "180224/fs" }; static SOC_ENUM_SINGLE_DECL(da7213_alc_attack_rate, DA7213_ALC_CTRL2, DA7213_ALC_ATTACK_SHIFT, da7213_alc_attack_rate_txt); /* ALC Release Rate select */ static const char * const da7213_alc_release_rate_txt[] = { "176/fs", "352/fs", "704/fs", "1408/fs", "2816/fs", "5632/fs", "11264/fs", "22528/fs", "45056/fs", "90112/fs", "180224/fs" }; static SOC_ENUM_SINGLE_DECL(da7213_alc_release_rate, DA7213_ALC_CTRL2, DA7213_ALC_RELEASE_SHIFT, da7213_alc_release_rate_txt); /* ALC Hold Time select */ static const char * const da7213_alc_hold_time_txt[] = { "62/fs", "124/fs", "248/fs", "496/fs", "992/fs", "1984/fs", "3968/fs", "7936/fs", "15872/fs", "31744/fs", "63488/fs", "126976/fs", "253952/fs", "507904/fs", "1015808/fs", "2031616/fs" }; static SOC_ENUM_SINGLE_DECL(da7213_alc_hold_time, DA7213_ALC_CTRL3, DA7213_ALC_HOLD_SHIFT, da7213_alc_hold_time_txt); /* ALC Input Signal Tracking rate select */ static const char * const da7213_alc_integ_rate_txt[] = { "1/4", "1/16", "1/256", "1/65536" }; static SOC_ENUM_SINGLE_DECL(da7213_alc_integ_attack_rate, DA7213_ALC_CTRL3, DA7213_ALC_INTEG_ATTACK_SHIFT, da7213_alc_integ_rate_txt); static SOC_ENUM_SINGLE_DECL(da7213_alc_integ_release_rate, DA7213_ALC_CTRL3, DA7213_ALC_INTEG_RELEASE_SHIFT, da7213_alc_integ_rate_txt); /* * Control Functions */ /* Locked Kcontrol calls */ static int da7213_volsw_locked_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); int ret; mutex_lock(&da7213->ctrl_lock); ret = snd_soc_get_volsw(kcontrol, ucontrol); mutex_unlock(&da7213->ctrl_lock); return ret; } static int da7213_volsw_locked_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); int ret; mutex_lock(&da7213->ctrl_lock); ret = snd_soc_put_volsw(kcontrol, ucontrol); mutex_unlock(&da7213->ctrl_lock); return ret; } static int da7213_enum_locked_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); int ret; mutex_lock(&da7213->ctrl_lock); ret = snd_soc_get_enum_double(kcontrol, ucontrol); mutex_unlock(&da7213->ctrl_lock); return ret; } static int da7213_enum_locked_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); int ret; mutex_lock(&da7213->ctrl_lock); ret = snd_soc_put_enum_double(kcontrol, ucontrol); mutex_unlock(&da7213->ctrl_lock); return ret; } /* ALC */ static int da7213_get_alc_data(struct snd_soc_component *component, u8 reg_val) { int mid_data, top_data; int sum = 0; u8 iteration; for (iteration = 0; iteration < DA7213_ALC_AVG_ITERATIONS; iteration++) { /* Select the left or right channel and capture data */ snd_soc_component_write(component, DA7213_ALC_CIC_OP_LVL_CTRL, reg_val); /* Select middle 8 bits for read back from data register */ snd_soc_component_write(component, DA7213_ALC_CIC_OP_LVL_CTRL, reg_val | DA7213_ALC_DATA_MIDDLE); mid_data = snd_soc_component_read(component, DA7213_ALC_CIC_OP_LVL_DATA); /* Select top 8 bits for read back from data register */ snd_soc_component_write(component, DA7213_ALC_CIC_OP_LVL_CTRL, reg_val | DA7213_ALC_DATA_TOP); top_data = snd_soc_component_read(component, DA7213_ALC_CIC_OP_LVL_DATA); sum += ((mid_data << 8) | (top_data << 16)); } return sum / DA7213_ALC_AVG_ITERATIONS; } static void da7213_alc_calib_man(struct snd_soc_component *component) { u8 reg_val; int avg_left_data, avg_right_data, offset_l, offset_r; /* Calculate average for Left and Right data */ /* Left Data */ avg_left_data = da7213_get_alc_data(component, DA7213_ALC_CIC_OP_CHANNEL_LEFT); /* Right Data */ avg_right_data = da7213_get_alc_data(component, DA7213_ALC_CIC_OP_CHANNEL_RIGHT); /* Calculate DC offset */ offset_l = -avg_left_data; offset_r = -avg_right_data; reg_val = (offset_l & DA7213_ALC_OFFSET_15_8) >> 8; snd_soc_component_write(component, DA7213_ALC_OFFSET_MAN_M_L, reg_val); reg_val = (offset_l & DA7213_ALC_OFFSET_19_16) >> 16; snd_soc_component_write(component, DA7213_ALC_OFFSET_MAN_U_L, reg_val); reg_val = (offset_r & DA7213_ALC_OFFSET_15_8) >> 8; snd_soc_component_write(component, DA7213_ALC_OFFSET_MAN_M_R, reg_val); reg_val = (offset_r & DA7213_ALC_OFFSET_19_16) >> 16; snd_soc_component_write(component, DA7213_ALC_OFFSET_MAN_U_R, reg_val); /* Enable analog/digital gain mode & offset cancellation */ snd_soc_component_update_bits(component, DA7213_ALC_CTRL1, DA7213_ALC_OFFSET_EN | DA7213_ALC_SYNC_MODE, DA7213_ALC_OFFSET_EN | DA7213_ALC_SYNC_MODE); } static void da7213_alc_calib_auto(struct snd_soc_component *component) { u8 alc_ctrl1; /* Begin auto calibration and wait for completion */ snd_soc_component_update_bits(component, DA7213_ALC_CTRL1, DA7213_ALC_AUTO_CALIB_EN, DA7213_ALC_AUTO_CALIB_EN); do { alc_ctrl1 = snd_soc_component_read(component, DA7213_ALC_CTRL1); } while (alc_ctrl1 & DA7213_ALC_AUTO_CALIB_EN); /* If auto calibration fails, fall back to digital gain only mode */ if (alc_ctrl1 & DA7213_ALC_CALIB_OVERFLOW) { dev_warn(component->dev, "ALC auto calibration failed with overflow\n"); snd_soc_component_update_bits(component, DA7213_ALC_CTRL1, DA7213_ALC_OFFSET_EN | DA7213_ALC_SYNC_MODE, 0); } else { /* Enable analog/digital gain mode & offset cancellation */ snd_soc_component_update_bits(component, DA7213_ALC_CTRL1, DA7213_ALC_OFFSET_EN | DA7213_ALC_SYNC_MODE, DA7213_ALC_OFFSET_EN | DA7213_ALC_SYNC_MODE); } } static void da7213_alc_calib(struct snd_soc_component *component) { struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); u8 adc_l_ctrl, adc_r_ctrl; u8 mixin_l_sel, mixin_r_sel; u8 mic_1_ctrl, mic_2_ctrl; /* Save current values from ADC control registers */ adc_l_ctrl = snd_soc_component_read(component, DA7213_ADC_L_CTRL); adc_r_ctrl = snd_soc_component_read(component, DA7213_ADC_R_CTRL); /* Save current values from MIXIN_L/R_SELECT registers */ mixin_l_sel = snd_soc_component_read(component, DA7213_MIXIN_L_SELECT); mixin_r_sel = snd_soc_component_read(component, DA7213_MIXIN_R_SELECT); /* Save current values from MIC control registers */ mic_1_ctrl = snd_soc_component_read(component, DA7213_MIC_1_CTRL); mic_2_ctrl = snd_soc_component_read(component, DA7213_MIC_2_CTRL); /* Enable ADC Left and Right */ snd_soc_component_update_bits(component, DA7213_ADC_L_CTRL, DA7213_ADC_EN, DA7213_ADC_EN); snd_soc_component_update_bits(component, DA7213_ADC_R_CTRL, DA7213_ADC_EN, DA7213_ADC_EN); /* Enable MIC paths */ snd_soc_component_update_bits(component, DA7213_MIXIN_L_SELECT, DA7213_MIXIN_L_MIX_SELECT_MIC_1 | DA7213_MIXIN_L_MIX_SELECT_MIC_2, DA7213_MIXIN_L_MIX_SELECT_MIC_1 | DA7213_MIXIN_L_MIX_SELECT_MIC_2); snd_soc_component_update_bits(component, DA7213_MIXIN_R_SELECT, DA7213_MIXIN_R_MIX_SELECT_MIC_2 | DA7213_MIXIN_R_MIX_SELECT_MIC_1, DA7213_MIXIN_R_MIX_SELECT_MIC_2 | DA7213_MIXIN_R_MIX_SELECT_MIC_1); /* Mute MIC PGAs */ snd_soc_component_update_bits(component, DA7213_MIC_1_CTRL, DA7213_MUTE_EN, DA7213_MUTE_EN); snd_soc_component_update_bits(component, DA7213_MIC_2_CTRL, DA7213_MUTE_EN, DA7213_MUTE_EN); /* Perform calibration */ if (da7213->alc_calib_auto) da7213_alc_calib_auto(component); else da7213_alc_calib_man(component); /* Restore MIXIN_L/R_SELECT registers to their original states */ snd_soc_component_write(component, DA7213_MIXIN_L_SELECT, mixin_l_sel); snd_soc_component_write(component, DA7213_MIXIN_R_SELECT, mixin_r_sel); /* Restore ADC control registers to their original states */ snd_soc_component_write(component, DA7213_ADC_L_CTRL, adc_l_ctrl); snd_soc_component_write(component, DA7213_ADC_R_CTRL, adc_r_ctrl); /* Restore original values of MIC control registers */ snd_soc_component_write(component, DA7213_MIC_1_CTRL, mic_1_ctrl); snd_soc_component_write(component, DA7213_MIC_2_CTRL, mic_2_ctrl); } static int da7213_put_mixin_gain(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); int ret; ret = snd_soc_put_volsw_2r(kcontrol, ucontrol); /* If ALC in operation, make sure calibrated offsets are updated */ if ((!ret) && (da7213->alc_en)) da7213_alc_calib(component); return ret; } static int da7213_put_alc_sw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); /* Force ALC offset calibration if enabling ALC */ if (ucontrol->value.integer.value[0] || ucontrol->value.integer.value[1]) { if (!da7213->alc_en) { da7213_alc_calib(component); da7213->alc_en = true; } } else { da7213->alc_en = false; } return snd_soc_put_volsw(kcontrol, ucontrol); } /* ToneGen */ static int da7213_tonegen_freq_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); struct soc_mixer_control *mixer_ctrl = (struct soc_mixer_control *) kcontrol->private_value; unsigned int reg = mixer_ctrl->reg; __le16 val; int ret; mutex_lock(&da7213->ctrl_lock); ret = regmap_raw_read(da7213->regmap, reg, &val, sizeof(val)); mutex_unlock(&da7213->ctrl_lock); if (ret) return ret; /* * Frequency value spans two 8-bit registers, lower then upper byte. * Therefore we need to convert to host endianness here. */ ucontrol->value.integer.value[0] = le16_to_cpu(val); return 0; } static int da7213_tonegen_freq_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_component *component = snd_soc_kcontrol_component(kcontrol); struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); struct soc_mixer_control *mixer_ctrl = (struct soc_mixer_control *) kcontrol->private_value; unsigned int reg = mixer_ctrl->reg; __le16 val_new, val_old; int ret; /* * Frequency value spans two 8-bit registers, lower then upper byte. * Therefore we need to convert to little endian here to align with * HW registers. */ val_new = cpu_to_le16(ucontrol->value.integer.value[0]); mutex_lock(&da7213->ctrl_lock); ret = regmap_raw_read(da7213->regmap, reg, &val_old, sizeof(val_old)); if (ret == 0 && (val_old != val_new)) ret = regmap_raw_write(da7213->regmap, reg, &val_new, sizeof(val_new)); mutex_unlock(&da7213->ctrl_lock); if (ret < 0) return ret; return val_old != val_new; } /* * KControls */ static const struct snd_kcontrol_new da7213_snd_controls[] = { /* Volume controls */ SOC_SINGLE_TLV("Mic 1 Volume", DA7213_MIC_1_GAIN, DA7213_MIC_AMP_GAIN_SHIFT, DA7213_MIC_AMP_GAIN_MAX, DA7213_NO_INVERT, mic_vol_tlv), SOC_SINGLE_TLV("Mic 2 Volume", DA7213_MIC_2_GAIN, DA7213_MIC_AMP_GAIN_SHIFT, DA7213_MIC_AMP_GAIN_MAX, DA7213_NO_INVERT, mic_vol_tlv), SOC_DOUBLE_R_TLV("Aux Volume", DA7213_AUX_L_GAIN, DA7213_AUX_R_GAIN, DA7213_AUX_AMP_GAIN_SHIFT, DA7213_AUX_AMP_GAIN_MAX, DA7213_NO_INVERT, aux_vol_tlv), SOC_DOUBLE_R_EXT_TLV("Mixin PGA Volume", DA7213_MIXIN_L_GAIN, DA7213_MIXIN_R_GAIN, DA7213_MIXIN_AMP_GAIN_SHIFT, DA7213_MIXIN_AMP_GAIN_MAX, DA7213_NO_INVERT, snd_soc_get_volsw_2r, da7213_put_mixin_gain, mixin_gain_tlv), SOC_DOUBLE_R_TLV("ADC Volume", DA7213_ADC_L_GAIN, DA7213_ADC_R_GAIN, DA7213_ADC_AMP_GAIN_SHIFT, DA7213_ADC_AMP_GAIN_MAX, DA7213_NO_INVERT, digital_gain_tlv), SOC_DOUBLE_R_TLV("DAC Volume", DA7213_DAC_L_GAIN, DA7213_DAC_R_GAIN, DA7213_DAC_AMP_GAIN_SHIFT, DA7213_DAC_AMP_GAIN_MAX, DA7213_NO_INVERT, digital_gain_tlv), SOC_DOUBLE_R_TLV("Headphone Volume", DA7213_HP_L_GAIN, DA7213_HP_R_GAIN, DA7213_HP_AMP_GAIN_SHIFT, DA7213_HP_AMP_GAIN_MAX, DA7213_NO_INVERT, hp_vol_tlv), SOC_SINGLE_TLV("Lineout Volume", DA7213_LINE_GAIN, DA7213_LINE_AMP_GAIN_SHIFT, DA7213_LINE_AMP_GAIN_MAX, DA7213_NO_INVERT, lineout_vol_tlv), /* DAC Equalizer controls */ SOC_SINGLE("DAC EQ Switch", DA7213_DAC_FILTERS4, DA7213_DAC_EQ_EN_SHIFT, DA7213_DAC_EQ_EN_MAX, DA7213_NO_INVERT), SOC_SINGLE_TLV("DAC EQ1 Volume", DA7213_DAC_FILTERS2, DA7213_DAC_EQ_BAND1_SHIFT, DA7213_DAC_EQ_BAND_MAX, DA7213_NO_INVERT, eq_gain_tlv), SOC_SINGLE_TLV("DAC EQ2 Volume", DA7213_DAC_FILTERS2, DA7213_DAC_EQ_BAND2_SHIFT, DA7213_DAC_EQ_BAND_MAX, DA7213_NO_INVERT, eq_gain_tlv), SOC_SINGLE_TLV("DAC EQ3 Volume", DA7213_DAC_FILTERS3, DA7213_DAC_EQ_BAND3_SHIFT, DA7213_DAC_EQ_BAND_MAX, DA7213_NO_INVERT, eq_gain_tlv), SOC_SINGLE_TLV("DAC EQ4 Volume", DA7213_DAC_FILTERS3, DA7213_DAC_EQ_BAND4_SHIFT, DA7213_DAC_EQ_BAND_MAX, DA7213_NO_INVERT, eq_gain_tlv), SOC_SINGLE_TLV("DAC EQ5 Volume", DA7213_DAC_FILTERS4, DA7213_DAC_EQ_BAND5_SHIFT, DA7213_DAC_EQ_BAND_MAX, DA7213_NO_INVERT, eq_gain_tlv), /* High Pass Filter and Voice Mode controls */ SOC_SINGLE("ADC HPF Switch", DA7213_ADC_FILTERS1, DA7213_HPF_EN_SHIFT, DA7213_HPF_EN_MAX, DA7213_NO_INVERT), SOC_ENUM("ADC HPF Cutoff", da7213_adc_audio_hpf_corner), SOC_SINGLE("ADC Voice Mode Switch", DA7213_ADC_FILTERS1, DA7213_VOICE_EN_SHIFT, DA7213_VOICE_EN_MAX, DA7213_NO_INVERT), SOC_ENUM("ADC Voice Cutoff", da7213_adc_voice_hpf_corner), SOC_SINGLE("DAC HPF Switch", DA7213_DAC_FILTERS1, DA7213_HPF_EN_SHIFT, DA7213_HPF_EN_MAX, DA7213_NO_INVERT), SOC_ENUM("DAC HPF Cutoff", da7213_dac_audio_hpf_corner), SOC_SINGLE("DAC Voice Mode Switch", DA7213_DAC_FILTERS1, DA7213_VOICE_EN_SHIFT, DA7213_VOICE_EN_MAX, DA7213_NO_INVERT), SOC_ENUM("DAC Voice Cutoff", da7213_dac_voice_hpf_corner), /* Mute controls */ SOC_SINGLE("Mic 1 Switch", DA7213_MIC_1_CTRL, DA7213_MUTE_EN_SHIFT, DA7213_MUTE_EN_MAX, DA7213_INVERT), SOC_SINGLE("Mic 2 Switch", DA7213_MIC_2_CTRL, DA7213_MUTE_EN_SHIFT, DA7213_MUTE_EN_MAX, DA7213_INVERT), SOC_DOUBLE_R("Aux Switch", DA7213_AUX_L_CTRL, DA7213_AUX_R_CTRL, DA7213_MUTE_EN_SHIFT, DA7213_MUTE_EN_MAX, DA7213_INVERT), SOC_DOUBLE_R("Mixin PGA Switch", DA7213_MIXIN_L_CTRL, DA7213_MIXIN_R_CTRL, DA7213_MUTE_EN_SHIFT, DA7213_MUTE_EN_MAX, DA7213_INVERT), SOC_DOUBLE_R("ADC Switch", DA7213_ADC_L_CTRL, DA7213_ADC_R_CTRL, DA7213_MUTE_EN_SHIFT, DA7213_MUTE_EN_MAX, DA7213_INVERT), SOC_DOUBLE_R("Headphone Switch", DA7213_HP_L_CTRL, DA7213_HP_R_CTRL, DA7213_MUTE_EN_SHIFT, DA7213_MUTE_EN_MAX, DA7213_INVERT), SOC_SINGLE("Lineout Switch", DA7213_LINE_CTRL, DA7213_MUTE_EN_SHIFT, DA7213_MUTE_EN_MAX, DA7213_INVERT), SOC_SINGLE("DAC Soft Mute Switch", DA7213_DAC_FILTERS5, DA7213_DAC_SOFTMUTE_EN_SHIFT, DA7213_DAC_SOFTMUTE_EN_MAX, DA7213_NO_INVERT), SOC_ENUM("DAC Soft Mute Rate", da7213_dac_soft_mute_rate), /* Zero Cross controls */ SOC_DOUBLE_R("Aux ZC Switch", DA7213_AUX_L_CTRL, DA7213_AUX_R_CTRL, DA7213_ZC_EN_SHIFT, DA7213_ZC_EN_MAX, DA7213_NO_INVERT), SOC_DOUBLE_R("Mixin PGA ZC Switch", DA7213_MIXIN_L_CTRL, DA7213_MIXIN_R_CTRL, DA7213_ZC_EN_SHIFT, DA7213_ZC_EN_MAX, DA7213_NO_INVERT), SOC_DOUBLE_R("Headphone ZC Switch", DA7213_HP_L_CTRL, DA7213_HP_R_CTRL, DA7213_ZC_EN_SHIFT, DA7213_ZC_EN_MAX, DA7213_NO_INVERT), /* Tone Generator */ SOC_SINGLE_EXT_TLV("ToneGen Volume", DA7213_TONE_GEN_CFG2, DA7213_TONE_GEN_GAIN_SHIFT, DA7213_TONE_GEN_GAIN_MAX, DA7213_NO_INVERT, da7213_volsw_locked_get, da7213_volsw_locked_put, da7213_tonegen_gain_tlv), SOC_ENUM_EXT("ToneGen DTMF Key", da7213_tonegen_dtmf_key, da7213_enum_locked_get, da7213_enum_locked_put), SOC_SINGLE_EXT("ToneGen DTMF Switch", DA7213_TONE_GEN_CFG1, DA7213_DTMF_EN_SHIFT, DA7213_SWITCH_EN_MAX, DA7213_NO_INVERT, da7213_volsw_locked_get, da7213_volsw_locked_put), SOC_SINGLE_EXT("ToneGen Start", DA7213_TONE_GEN_CFG1, DA7213_START_STOPN_SHIFT, DA7213_SWITCH_EN_MAX, DA7213_NO_INVERT, da7213_volsw_locked_get, da7213_volsw_locked_put), SOC_ENUM_EXT("ToneGen Sinewave Gen Type", da7213_tonegen_swg_sel, da7213_enum_locked_get, da7213_enum_locked_put), SOC_SINGLE_EXT("ToneGen Sinewave1 Freq", DA7213_TONE_GEN_FREQ1_L, DA7213_FREQ1_L_SHIFT, DA7213_FREQ_MAX, DA7213_NO_INVERT, da7213_tonegen_freq_get, da7213_tonegen_freq_put), SOC_SINGLE_EXT("ToneGen Sinewave2 Freq", DA7213_TONE_GEN_FREQ2_L, DA7213_FREQ2_L_SHIFT, DA7213_FREQ_MAX, DA7213_NO_INVERT, da7213_tonegen_freq_get, da7213_tonegen_freq_put), SOC_SINGLE_EXT("ToneGen On Time", DA7213_TONE_GEN_ON_PER, DA7213_BEEP_ON_PER_SHIFT, DA7213_BEEP_ON_OFF_MAX, DA7213_NO_INVERT, da7213_volsw_locked_get, da7213_volsw_locked_put), SOC_SINGLE("ToneGen Off Time", DA7213_TONE_GEN_OFF_PER, DA7213_BEEP_OFF_PER_SHIFT, DA7213_BEEP_ON_OFF_MAX, DA7213_NO_INVERT), /* Gain Ramping controls */ SOC_DOUBLE_R("Aux Gain Ramping Switch", DA7213_AUX_L_CTRL, DA7213_AUX_R_CTRL, DA7213_GAIN_RAMP_EN_SHIFT, DA7213_GAIN_RAMP_EN_MAX, DA7213_NO_INVERT), SOC_DOUBLE_R("Mixin Gain Ramping Switch", DA7213_MIXIN_L_CTRL, DA7213_MIXIN_R_CTRL, DA7213_GAIN_RAMP_EN_SHIFT, DA7213_GAIN_RAMP_EN_MAX, DA7213_NO_INVERT), SOC_DOUBLE_R("ADC Gain Ramping Switch", DA7213_ADC_L_CTRL, DA7213_ADC_R_CTRL, DA7213_GAIN_RAMP_EN_SHIFT, DA7213_GAIN_RAMP_EN_MAX, DA7213_NO_INVERT), SOC_DOUBLE_R("DAC Gain Ramping Switch", DA7213_DAC_L_CTRL, DA7213_DAC_R_CTRL, DA7213_GAIN_RAMP_EN_SHIFT, DA7213_GAIN_RAMP_EN_MAX, DA7213_NO_INVERT), SOC_DOUBLE_R("Headphone Gain Ramping Switch", DA7213_HP_L_CTRL, DA7213_HP_R_CTRL, DA7213_GAIN_RAMP_EN_SHIFT, DA7213_GAIN_RAMP_EN_MAX, DA7213_NO_INVERT), SOC_SINGLE("Lineout Gain Ramping Switch", DA7213_LINE_CTRL, DA7213_GAIN_RAMP_EN_SHIFT, DA7213_GAIN_RAMP_EN_MAX, DA7213_NO_INVERT), SOC_ENUM("Gain Ramping Rate", da7213_gain_ramp_rate), /* DAC Noise Gate controls */ SOC_SINGLE("DAC NG Switch", DA7213_DAC_NG_CTRL, DA7213_DAC_NG_EN_SHIFT, DA7213_DAC_NG_EN_MAX, DA7213_NO_INVERT), SOC_ENUM("DAC NG Setup Time", da7213_dac_ng_setup_time), SOC_ENUM("DAC NG Rampup Rate", da7213_dac_ng_rampup_rate), SOC_ENUM("DAC NG Rampdown Rate", da7213_dac_ng_rampdown_rate), SOC_SINGLE("DAC NG OFF Threshold", DA7213_DAC_NG_OFF_THRESHOLD, DA7213_DAC_NG_THRESHOLD_SHIFT, DA7213_DAC_NG_THRESHOLD_MAX, DA7213_NO_INVERT), SOC_SINGLE("DAC NG ON Threshold", DA7213_DAC_NG_ON_THRESHOLD, DA7213_DAC_NG_THRESHOLD_SHIFT, DA7213_DAC_NG_THRESHOLD_MAX, DA7213_NO_INVERT), /* DAC Routing & Inversion */ SOC_DOUBLE("DAC Mono Switch", DA7213_DIG_ROUTING_DAC, DA7213_DAC_L_MONO_SHIFT, DA7213_DAC_R_MONO_SHIFT, DA7213_DAC_MONO_MAX, DA7213_NO_INVERT), SOC_DOUBLE("DAC Invert Switch", DA7213_DIG_CTRL, DA7213_DAC_L_INV_SHIFT, DA7213_DAC_R_INV_SHIFT, DA7213_DAC_INV_MAX, DA7213_NO_INVERT), /* DMIC controls */ SOC_DOUBLE_R("DMIC Switch", DA7213_MIXIN_L_SELECT, DA7213_MIXIN_R_SELECT, DA7213_DMIC_EN_SHIFT, DA7213_DMIC_EN_MAX, DA7213_NO_INVERT), /* ALC Controls */ SOC_DOUBLE_EXT("ALC Switch", DA7213_ALC_CTRL1, DA7213_ALC_L_EN_SHIFT, DA7213_ALC_R_EN_SHIFT, DA7213_ALC_EN_MAX, DA7213_NO_INVERT, snd_soc_get_volsw, da7213_put_alc_sw), SOC_ENUM("ALC Attack Rate", da7213_alc_attack_rate), SOC_ENUM("ALC Release Rate", da7213_alc_release_rate), SOC_ENUM("ALC Hold Time", da7213_alc_hold_time), /* * Rate at which input signal envelope is tracked as the signal gets * larger */ SOC_ENUM("ALC Integ Attack Rate", da7213_alc_integ_attack_rate), /* * Rate at which input signal envelope is tracked as the signal gets * smaller */ SOC_ENUM("ALC Integ Release Rate", da7213_alc_integ_release_rate), SOC_SINGLE_TLV("ALC Noise Threshold Volume", DA7213_ALC_NOISE, DA7213_ALC_THRESHOLD_SHIFT, DA7213_ALC_THRESHOLD_MAX, DA7213_INVERT, alc_threshold_tlv), SOC_SINGLE_TLV("ALC Min Threshold Volume", DA7213_ALC_TARGET_MIN, DA7213_ALC_THRESHOLD_SHIFT, DA7213_ALC_THRESHOLD_MAX, DA7213_INVERT, alc_threshold_tlv), SOC_SINGLE_TLV("ALC Max Threshold Volume", DA7213_ALC_TARGET_MAX, DA7213_ALC_THRESHOLD_SHIFT, DA7213_ALC_THRESHOLD_MAX, DA7213_INVERT, alc_threshold_tlv), SOC_SINGLE_TLV("ALC Max Attenuation Volume", DA7213_ALC_GAIN_LIMITS, DA7213_ALC_ATTEN_MAX_SHIFT, DA7213_ALC_ATTEN_GAIN_MAX_MAX, DA7213_NO_INVERT, alc_gain_tlv), SOC_SINGLE_TLV("ALC Max Gain Volume", DA7213_ALC_GAIN_LIMITS, DA7213_ALC_GAIN_MAX_SHIFT, DA7213_ALC_ATTEN_GAIN_MAX_MAX, DA7213_NO_INVERT, alc_gain_tlv), SOC_SINGLE_TLV("ALC Min Analog Gain Volume", DA7213_ALC_ANA_GAIN_LIMITS, DA7213_ALC_ANA_GAIN_MIN_SHIFT, DA7213_ALC_ANA_GAIN_MAX, DA7213_NO_INVERT, alc_analog_gain_tlv), SOC_SINGLE_TLV("ALC Max Analog Gain Volume", DA7213_ALC_ANA_GAIN_LIMITS, DA7213_ALC_ANA_GAIN_MAX_SHIFT, DA7213_ALC_ANA_GAIN_MAX, DA7213_NO_INVERT, alc_analog_gain_tlv), SOC_SINGLE("ALC Anticlip Mode Switch", DA7213_ALC_ANTICLIP_CTRL, DA7213_ALC_ANTICLIP_EN_SHIFT, DA7213_ALC_ANTICLIP_EN_MAX, DA7213_NO_INVERT), SOC_SINGLE("ALC Anticlip Level", DA7213_ALC_ANTICLIP_LEVEL, DA7213_ALC_ANTICLIP_LEVEL_SHIFT, DA7213_ALC_ANTICLIP_LEVEL_MAX, DA7213_NO_INVERT), }; /* * DAPM */ /* * Enums */ /* MIC PGA source select */ static const char * const da7213_mic_amp_in_sel_txt[] = { "Differential", "MIC_P", "MIC_N" }; static SOC_ENUM_SINGLE_DECL(da7213_mic_1_amp_in_sel, DA7213_MIC_1_CTRL, DA7213_MIC_AMP_IN_SEL_SHIFT, da7213_mic_amp_in_sel_txt); static const struct snd_kcontrol_new da7213_mic_1_amp_in_sel_mux = SOC_DAPM_ENUM("Mic 1 Amp Source MUX", da7213_mic_1_amp_in_sel); static SOC_ENUM_SINGLE_DECL(da7213_mic_2_amp_in_sel, DA7213_MIC_2_CTRL, DA7213_MIC_AMP_IN_SEL_SHIFT, da7213_mic_amp_in_sel_txt); static const struct snd_kcontrol_new da7213_mic_2_amp_in_sel_mux = SOC_DAPM_ENUM("Mic 2 Amp Source MUX", da7213_mic_2_amp_in_sel); /* DAI routing select */ static const char * const da7213_dai_src_txt[] = { "ADC Left", "ADC Right", "DAI Input Left", "DAI Input Right" }; static SOC_ENUM_SINGLE_DECL(da7213_dai_l_src, DA7213_DIG_ROUTING_DAI, DA7213_DAI_L_SRC_SHIFT, da7213_dai_src_txt); static const struct snd_kcontrol_new da7213_dai_l_src_mux = SOC_DAPM_ENUM("DAI Left Source MUX", da7213_dai_l_src); static SOC_ENUM_SINGLE_DECL(da7213_dai_r_src, DA7213_DIG_ROUTING_DAI, DA7213_DAI_R_SRC_SHIFT, da7213_dai_src_txt); static const struct snd_kcontrol_new da7213_dai_r_src_mux = SOC_DAPM_ENUM("DAI Right Source MUX", da7213_dai_r_src); /* DAC routing select */ static const char * const da7213_dac_src_txt[] = { "ADC Output Left", "ADC Output Right", "DAI Input Left", "DAI Input Right" }; static SOC_ENUM_SINGLE_DECL(da7213_dac_l_src, DA7213_DIG_ROUTING_DAC, DA7213_DAC_L_SRC_SHIFT, da7213_dac_src_txt); static const struct snd_kcontrol_new da7213_dac_l_src_mux = SOC_DAPM_ENUM("DAC Left Source MUX", da7213_dac_l_src); static SOC_ENUM_SINGLE_DECL(da7213_dac_r_src, DA7213_DIG_ROUTING_DAC, DA7213_DAC_R_SRC_SHIFT, da7213_dac_src_txt); static const struct snd_kcontrol_new da7213_dac_r_src_mux = SOC_DAPM_ENUM("DAC Right Source MUX", da7213_dac_r_src); /* * Mixer Controls */ /* Mixin Left */ static const struct snd_kcontrol_new da7213_dapm_mixinl_controls[] = { SOC_DAPM_SINGLE("Aux Left Switch", DA7213_MIXIN_L_SELECT, DA7213_MIXIN_L_MIX_SELECT_AUX_L_SHIFT, DA7213_MIXIN_L_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mic 1 Switch", DA7213_MIXIN_L_SELECT, DA7213_MIXIN_L_MIX_SELECT_MIC_1_SHIFT, DA7213_MIXIN_L_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mic 2 Switch", DA7213_MIXIN_L_SELECT, DA7213_MIXIN_L_MIX_SELECT_MIC_2_SHIFT, DA7213_MIXIN_L_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mixin Right Switch", DA7213_MIXIN_L_SELECT, DA7213_MIXIN_L_MIX_SELECT_MIXIN_R_SHIFT, DA7213_MIXIN_L_MIX_SELECT_MAX, DA7213_NO_INVERT), }; /* Mixin Right */ static const struct snd_kcontrol_new da7213_dapm_mixinr_controls[] = { SOC_DAPM_SINGLE("Aux Right Switch", DA7213_MIXIN_R_SELECT, DA7213_MIXIN_R_MIX_SELECT_AUX_R_SHIFT, DA7213_MIXIN_R_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mic 2 Switch", DA7213_MIXIN_R_SELECT, DA7213_MIXIN_R_MIX_SELECT_MIC_2_SHIFT, DA7213_MIXIN_R_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mic 1 Switch", DA7213_MIXIN_R_SELECT, DA7213_MIXIN_R_MIX_SELECT_MIC_1_SHIFT, DA7213_MIXIN_R_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mixin Left Switch", DA7213_MIXIN_R_SELECT, DA7213_MIXIN_R_MIX_SELECT_MIXIN_L_SHIFT, DA7213_MIXIN_R_MIX_SELECT_MAX, DA7213_NO_INVERT), }; /* Mixout Left */ static const struct snd_kcontrol_new da7213_dapm_mixoutl_controls[] = { SOC_DAPM_SINGLE("Aux Left Switch", DA7213_MIXOUT_L_SELECT, DA7213_MIXOUT_L_MIX_SELECT_AUX_L_SHIFT, DA7213_MIXOUT_L_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mixin Left Switch", DA7213_MIXOUT_L_SELECT, DA7213_MIXOUT_L_MIX_SELECT_MIXIN_L_SHIFT, DA7213_MIXOUT_L_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mixin Right Switch", DA7213_MIXOUT_L_SELECT, DA7213_MIXOUT_L_MIX_SELECT_MIXIN_R_SHIFT, DA7213_MIXOUT_L_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("DAC Left Switch", DA7213_MIXOUT_L_SELECT, DA7213_MIXOUT_L_MIX_SELECT_DAC_L_SHIFT, DA7213_MIXOUT_L_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Aux Left Invert Switch", DA7213_MIXOUT_L_SELECT, DA7213_MIXOUT_L_MIX_SELECT_AUX_L_INVERTED_SHIFT, DA7213_MIXOUT_L_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mixin Left Invert Switch", DA7213_MIXOUT_L_SELECT, DA7213_MIXOUT_L_MIX_SELECT_MIXIN_L_INVERTED_SHIFT, DA7213_MIXOUT_L_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mixin Right Invert Switch", DA7213_MIXOUT_L_SELECT, DA7213_MIXOUT_L_MIX_SELECT_MIXIN_R_INVERTED_SHIFT, DA7213_MIXOUT_L_MIX_SELECT_MAX, DA7213_NO_INVERT), }; /* Mixout Right */ static const struct snd_kcontrol_new da7213_dapm_mixoutr_controls[] = { SOC_DAPM_SINGLE("Aux Right Switch", DA7213_MIXOUT_R_SELECT, DA7213_MIXOUT_R_MIX_SELECT_AUX_R_SHIFT, DA7213_MIXOUT_R_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mixin Right Switch", DA7213_MIXOUT_R_SELECT, DA7213_MIXOUT_R_MIX_SELECT_MIXIN_R_SHIFT, DA7213_MIXOUT_R_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mixin Left Switch", DA7213_MIXOUT_R_SELECT, DA7213_MIXOUT_R_MIX_SELECT_MIXIN_L_SHIFT, DA7213_MIXOUT_R_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("DAC Right Switch", DA7213_MIXOUT_R_SELECT, DA7213_MIXOUT_R_MIX_SELECT_DAC_R_SHIFT, DA7213_MIXOUT_R_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Aux Right Invert Switch", DA7213_MIXOUT_R_SELECT, DA7213_MIXOUT_R_MIX_SELECT_AUX_R_INVERTED_SHIFT, DA7213_MIXOUT_R_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mixin Right Invert Switch", DA7213_MIXOUT_R_SELECT, DA7213_MIXOUT_R_MIX_SELECT_MIXIN_R_INVERTED_SHIFT, DA7213_MIXOUT_R_MIX_SELECT_MAX, DA7213_NO_INVERT), SOC_DAPM_SINGLE("Mixin Left Invert Switch", DA7213_MIXOUT_R_SELECT, DA7213_MIXOUT_R_MIX_SELECT_MIXIN_L_INVERTED_SHIFT, DA7213_MIXOUT_R_MIX_SELECT_MAX, DA7213_NO_INVERT), }; /* * DAPM Events */ static int da7213_dai_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); u8 pll_ctrl, pll_status; int i = 0; bool srm_lock = false; switch (event) { case SND_SOC_DAPM_PRE_PMU: /* Enable DAI clks for master mode */ if (da7213->master) snd_soc_component_update_bits(component, DA7213_DAI_CLK_MODE, DA7213_DAI_CLK_EN_MASK, DA7213_DAI_CLK_EN_MASK); /* PC synchronised to DAI */ snd_soc_component_update_bits(component, DA7213_PC_COUNT, DA7213_PC_FREERUN_MASK, 0); /* If SRM not enabled then nothing more to do */ pll_ctrl = snd_soc_component_read(component, DA7213_PLL_CTRL); if (!(pll_ctrl & DA7213_PLL_SRM_EN)) return 0; /* Assist 32KHz mode PLL lock */ if (pll_ctrl & DA7213_PLL_32K_MODE) { snd_soc_component_write(component, 0xF0, 0x8B); snd_soc_component_write(component, 0xF2, 0x03); snd_soc_component_write(component, 0xF0, 0x00); } /* Check SRM has locked */ do { pll_status = snd_soc_component_read(component, DA7213_PLL_STATUS); if (pll_status & DA7213_PLL_SRM_LOCK) { srm_lock = true; } else { ++i; msleep(50); } } while ((i < DA7213_SRM_CHECK_RETRIES) && (!srm_lock)); if (!srm_lock) dev_warn(component->dev, "SRM failed to lock\n"); return 0; case SND_SOC_DAPM_POST_PMD: /* Revert 32KHz PLL lock udpates if applied previously */ pll_ctrl = snd_soc_component_read(component, DA7213_PLL_CTRL); if (pll_ctrl & DA7213_PLL_32K_MODE) { snd_soc_component_write(component, 0xF0, 0x8B); snd_soc_component_write(component, 0xF2, 0x01); snd_soc_component_write(component, 0xF0, 0x00); } /* PC free-running */ snd_soc_component_update_bits(component, DA7213_PC_COUNT, DA7213_PC_FREERUN_MASK, DA7213_PC_FREERUN_MASK); /* Disable DAI clks if in master mode */ if (da7213->master) snd_soc_component_update_bits(component, DA7213_DAI_CLK_MODE, DA7213_DAI_CLK_EN_MASK, 0); return 0; default: return -EINVAL; } } /* * DAPM widgets */ static const struct snd_soc_dapm_widget da7213_dapm_widgets[] = { /* * Power Supply */ SND_SOC_DAPM_REGULATOR_SUPPLY("VDDMIC", 0, 0), /* * Input & Output */ /* Use a supply here as this controls both input & output DAIs */ SND_SOC_DAPM_SUPPLY("DAI", DA7213_DAI_CTRL, DA7213_DAI_EN_SHIFT, DA7213_NO_INVERT, da7213_dai_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), /* * Input */ /* Input Lines */ SND_SOC_DAPM_INPUT("MIC1"), SND_SOC_DAPM_INPUT("MIC2"), SND_SOC_DAPM_INPUT("AUXL"), SND_SOC_DAPM_INPUT("AUXR"), /* MUXs for Mic PGA source selection */ SND_SOC_DAPM_MUX("Mic 1 Amp Source MUX", SND_SOC_NOPM, 0, 0, &da7213_mic_1_amp_in_sel_mux), SND_SOC_DAPM_MUX("Mic 2 Amp Source MUX", SND_SOC_NOPM, 0, 0, &da7213_mic_2_amp_in_sel_mux), /* Input PGAs */ SND_SOC_DAPM_PGA("Mic 1 PGA", DA7213_MIC_1_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), SND_SOC_DAPM_PGA("Mic 2 PGA", DA7213_MIC_2_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), SND_SOC_DAPM_PGA("Aux Left PGA", DA7213_AUX_L_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), SND_SOC_DAPM_PGA("Aux Right PGA", DA7213_AUX_R_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), SND_SOC_DAPM_PGA("Mixin Left PGA", DA7213_MIXIN_L_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), SND_SOC_DAPM_PGA("Mixin Right PGA", DA7213_MIXIN_R_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), /* Mic Biases */ SND_SOC_DAPM_SUPPLY("Mic Bias 1", DA7213_MICBIAS_CTRL, DA7213_MICBIAS1_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), SND_SOC_DAPM_SUPPLY("Mic Bias 2", DA7213_MICBIAS_CTRL, DA7213_MICBIAS2_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), /* Input Mixers */ SND_SOC_DAPM_MIXER("Mixin Left", SND_SOC_NOPM, 0, 0, &da7213_dapm_mixinl_controls[0], ARRAY_SIZE(da7213_dapm_mixinl_controls)), SND_SOC_DAPM_MIXER("Mixin Right", SND_SOC_NOPM, 0, 0, &da7213_dapm_mixinr_controls[0], ARRAY_SIZE(da7213_dapm_mixinr_controls)), /* ADCs */ SND_SOC_DAPM_ADC("ADC Left", NULL, DA7213_ADC_L_CTRL, DA7213_ADC_EN_SHIFT, DA7213_NO_INVERT), SND_SOC_DAPM_ADC("ADC Right", NULL, DA7213_ADC_R_CTRL, DA7213_ADC_EN_SHIFT, DA7213_NO_INVERT), /* DAI */ SND_SOC_DAPM_MUX("DAI Left Source MUX", SND_SOC_NOPM, 0, 0, &da7213_dai_l_src_mux), SND_SOC_DAPM_MUX("DAI Right Source MUX", SND_SOC_NOPM, 0, 0, &da7213_dai_r_src_mux), SND_SOC_DAPM_AIF_OUT("DAIOUTL", "Capture", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_OUT("DAIOUTR", "Capture", 1, SND_SOC_NOPM, 0, 0), /* * Output */ /* DAI */ SND_SOC_DAPM_AIF_IN("DAIINL", "Playback", 0, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_AIF_IN("DAIINR", "Playback", 1, SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_MUX("DAC Left Source MUX", SND_SOC_NOPM, 0, 0, &da7213_dac_l_src_mux), SND_SOC_DAPM_MUX("DAC Right Source MUX", SND_SOC_NOPM, 0, 0, &da7213_dac_r_src_mux), /* DACs */ SND_SOC_DAPM_DAC("DAC Left", NULL, DA7213_DAC_L_CTRL, DA7213_DAC_EN_SHIFT, DA7213_NO_INVERT), SND_SOC_DAPM_DAC("DAC Right", NULL, DA7213_DAC_R_CTRL, DA7213_DAC_EN_SHIFT, DA7213_NO_INVERT), /* Output Mixers */ SND_SOC_DAPM_MIXER("Mixout Left", SND_SOC_NOPM, 0, 0, &da7213_dapm_mixoutl_controls[0], ARRAY_SIZE(da7213_dapm_mixoutl_controls)), SND_SOC_DAPM_MIXER("Mixout Right", SND_SOC_NOPM, 0, 0, &da7213_dapm_mixoutr_controls[0], ARRAY_SIZE(da7213_dapm_mixoutr_controls)), /* Output PGAs */ SND_SOC_DAPM_PGA("Mixout Left PGA", DA7213_MIXOUT_L_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), SND_SOC_DAPM_PGA("Mixout Right PGA", DA7213_MIXOUT_R_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), SND_SOC_DAPM_PGA("Lineout PGA", DA7213_LINE_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), SND_SOC_DAPM_PGA("Headphone Left PGA", DA7213_HP_L_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), SND_SOC_DAPM_PGA("Headphone Right PGA", DA7213_HP_R_CTRL, DA7213_AMP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), /* Charge Pump */ SND_SOC_DAPM_SUPPLY("Charge Pump", DA7213_CP_CTRL, DA7213_CP_EN_SHIFT, DA7213_NO_INVERT, NULL, 0), /* Output Lines */ SND_SOC_DAPM_OUTPUT("HPL"), SND_SOC_DAPM_OUTPUT("HPR"), SND_SOC_DAPM_OUTPUT("LINE"), }; /* * DAPM audio route definition */ static const struct snd_soc_dapm_route da7213_audio_map[] = { /* Dest Connecting Widget source */ /* Input path */ {"Mic Bias 1", NULL, "VDDMIC"}, {"Mic Bias 2", NULL, "VDDMIC"}, {"MIC1", NULL, "Mic Bias 1"}, {"MIC2", NULL, "Mic Bias 2"}, {"Mic 1 Amp Source MUX", "Differential", "MIC1"}, {"Mic 1 Amp Source MUX", "MIC_P", "MIC1"}, {"Mic 1 Amp Source MUX", "MIC_N", "MIC1"}, {"Mic 2 Amp Source MUX", "Differential", "MIC2"}, {"Mic 2 Amp Source MUX", "MIC_P", "MIC2"}, {"Mic 2 Amp Source MUX", "MIC_N", "MIC2"}, {"Mic 1 PGA", NULL, "Mic 1 Amp Source MUX"}, {"Mic 2 PGA", NULL, "Mic 2 Amp Source MUX"}, {"Aux Left PGA", NULL, "AUXL"}, {"Aux Right PGA", NULL, "AUXR"}, {"Mixin Left", "Aux Left Switch", "Aux Left PGA"}, {"Mixin Left", "Mic 1 Switch", "Mic 1 PGA"}, {"Mixin Left", "Mic 2 Switch", "Mic 2 PGA"}, {"Mixin Left", "Mixin Right Switch", "Mixin Right PGA"}, {"Mixin Right", "Aux Right Switch", "Aux Right PGA"}, {"Mixin Right", "Mic 2 Switch", "Mic 2 PGA"}, {"Mixin Right", "Mic 1 Switch", "Mic 1 PGA"}, {"Mixin Right", "Mixin Left Switch", "Mixin Left PGA"}, {"Mixin Left PGA", NULL, "Mixin Left"}, {"ADC Left", NULL, "Mixin Left PGA"}, {"Mixin Right PGA", NULL, "Mixin Right"}, {"ADC Right", NULL, "Mixin Right PGA"}, {"DAI Left Source MUX", "ADC Left", "ADC Left"}, {"DAI Left Source MUX", "ADC Right", "ADC Right"}, {"DAI Left Source MUX", "DAI Input Left", "DAIINL"}, {"DAI Left Source MUX", "DAI Input Right", "DAIINR"}, {"DAI Right Source MUX", "ADC Left", "ADC Left"}, {"DAI Right Source MUX", "ADC Right", "ADC Right"}, {"DAI Right Source MUX", "DAI Input Left", "DAIINL"}, {"DAI Right Source MUX", "DAI Input Right", "DAIINR"}, {"DAIOUTL", NULL, "DAI Left Source MUX"}, {"DAIOUTR", NULL, "DAI Right Source MUX"}, {"DAIOUTL", NULL, "DAI"}, {"DAIOUTR", NULL, "DAI"}, /* Output path */ {"DAIINL", NULL, "DAI"}, {"DAIINR", NULL, "DAI"}, {"DAC Left Source MUX", "ADC Output Left", "ADC Left"}, {"DAC Left Source MUX", "ADC Output Right", "ADC Right"}, {"DAC Left Source MUX", "DAI Input Left", "DAIINL"}, {"DAC Left Source MUX", "DAI Input Right", "DAIINR"}, {"DAC Right Source MUX", "ADC Output Left", "ADC Left"}, {"DAC Right Source MUX", "ADC Output Right", "ADC Right"}, {"DAC Right Source MUX", "DAI Input Left", "DAIINL"}, {"DAC Right Source MUX", "DAI Input Right", "DAIINR"}, {"DAC Left", NULL, "DAC Left Source MUX"}, {"DAC Right", NULL, "DAC Right Source MUX"}, {"Mixout Left", "Aux Left Switch", "Aux Left PGA"}, {"Mixout Left", "Mixin Left Switch", "Mixin Left PGA"}, {"Mixout Left", "Mixin Right Switch", "Mixin Right PGA"}, {"Mixout Left", "DAC Left Switch", "DAC Left"}, {"Mixout Left", "Aux Left Invert Switch", "Aux Left PGA"}, {"Mixout Left", "Mixin Left Invert Switch", "Mixin Left PGA"}, {"Mixout Left", "Mixin Right Invert Switch", "Mixin Right PGA"}, {"Mixout Right", "Aux Right Switch", "Aux Right PGA"}, {"Mixout Right", "Mixin Right Switch", "Mixin Right PGA"}, {"Mixout Right", "Mixin Left Switch", "Mixin Left PGA"}, {"Mixout Right", "DAC Right Switch", "DAC Right"}, {"Mixout Right", "Aux Right Invert Switch", "Aux Right PGA"}, {"Mixout Right", "Mixin Right Invert Switch", "Mixin Right PGA"}, {"Mixout Right", "Mixin Left Invert Switch", "Mixin Left PGA"}, {"Mixout Left PGA", NULL, "Mixout Left"}, {"Mixout Right PGA", NULL, "Mixout Right"}, {"Headphone Left PGA", NULL, "Mixout Left PGA"}, {"Headphone Left PGA", NULL, "Charge Pump"}, {"HPL", NULL, "Headphone Left PGA"}, {"Headphone Right PGA", NULL, "Mixout Right PGA"}, {"Headphone Right PGA", NULL, "Charge Pump"}, {"HPR", NULL, "Headphone Right PGA"}, {"Lineout PGA", NULL, "Mixout Right PGA"}, {"LINE", NULL, "Lineout PGA"}, }; static const struct reg_default da7213_reg_defaults[] = { { DA7213_DIG_ROUTING_DAI, 0x10 }, { DA7213_SR, 0x0A }, { DA7213_REFERENCES, 0x80 }, { DA7213_PLL_FRAC_TOP, 0x00 }, { DA7213_PLL_FRAC_BOT, 0x00 }, { DA7213_PLL_INTEGER, 0x20 }, { DA7213_PLL_CTRL, 0x0C }, { DA7213_DAI_CLK_MODE, 0x01 }, { DA7213_DAI_CTRL, 0x08 }, { DA7213_DIG_ROUTING_DAC, 0x32 }, { DA7213_AUX_L_GAIN, 0x35 }, { DA7213_AUX_R_GAIN, 0x35 }, { DA7213_MIXIN_L_SELECT, 0x00 }, { DA7213_MIXIN_R_SELECT, 0x00 }, { DA7213_MIXIN_L_GAIN, 0x03 }, { DA7213_MIXIN_R_GAIN, 0x03 }, { DA7213_ADC_L_GAIN, 0x6F }, { DA7213_ADC_R_GAIN, 0x6F }, { DA7213_ADC_FILTERS1, 0x80 }, { DA7213_MIC_1_GAIN, 0x01 }, { DA7213_MIC_2_GAIN, 0x01 }, { DA7213_DAC_FILTERS5, 0x00 }, { DA7213_DAC_FILTERS2, 0x88 }, { DA7213_DAC_FILTERS3, 0x88 }, { DA7213_DAC_FILTERS4, 0x08 }, { DA7213_DAC_FILTERS1, 0x80 }, { DA7213_DAC_L_GAIN, 0x6F }, { DA7213_DAC_R_GAIN, 0x6F }, { DA7213_CP_CTRL, 0x61 }, { DA7213_HP_L_GAIN, 0x39 }, { DA7213_HP_R_GAIN, 0x39 }, { DA7213_LINE_GAIN, 0x30 }, { DA7213_MIXOUT_L_SELECT, 0x00 }, { DA7213_MIXOUT_R_SELECT, 0x00 }, { DA7213_SYSTEM_MODES_INPUT, 0x00 }, { DA7213_SYSTEM_MODES_OUTPUT, 0x00 }, { DA7213_AUX_L_CTRL, 0x44 }, { DA7213_AUX_R_CTRL, 0x44 }, { DA7213_MICBIAS_CTRL, 0x11 }, { DA7213_MIC_1_CTRL, 0x40 }, { DA7213_MIC_2_CTRL, 0x40 }, { DA7213_MIXIN_L_CTRL, 0x40 }, { DA7213_MIXIN_R_CTRL, 0x40 }, { DA7213_ADC_L_CTRL, 0x40 }, { DA7213_ADC_R_CTRL, 0x40 }, { DA7213_DAC_L_CTRL, 0x48 }, { DA7213_DAC_R_CTRL, 0x40 }, { DA7213_HP_L_CTRL, 0x41 }, { DA7213_HP_R_CTRL, 0x40 }, { DA7213_LINE_CTRL, 0x40 }, { DA7213_MIXOUT_L_CTRL, 0x10 }, { DA7213_MIXOUT_R_CTRL, 0x10 }, { DA7213_LDO_CTRL, 0x00 }, { DA7213_IO_CTRL, 0x00 }, { DA7213_GAIN_RAMP_CTRL, 0x00}, { DA7213_MIC_CONFIG, 0x00 }, { DA7213_PC_COUNT, 0x00 }, { DA7213_CP_VOL_THRESHOLD1, 0x32 }, { DA7213_CP_DELAY, 0x95 }, { DA7213_CP_DETECTOR, 0x00 }, { DA7213_DAI_OFFSET, 0x00 }, { DA7213_DIG_CTRL, 0x00 }, { DA7213_ALC_CTRL2, 0x00 }, { DA7213_ALC_CTRL3, 0x00 }, { DA7213_ALC_NOISE, 0x3F }, { DA7213_ALC_TARGET_MIN, 0x3F }, { DA7213_ALC_TARGET_MAX, 0x00 }, { DA7213_ALC_GAIN_LIMITS, 0xFF }, { DA7213_ALC_ANA_GAIN_LIMITS, 0x71 }, { DA7213_ALC_ANTICLIP_CTRL, 0x00 }, { DA7213_ALC_ANTICLIP_LEVEL, 0x00 }, { DA7213_ALC_OFFSET_MAN_M_L, 0x00 }, { DA7213_ALC_OFFSET_MAN_U_L, 0x00 }, { DA7213_ALC_OFFSET_MAN_M_R, 0x00 }, { DA7213_ALC_OFFSET_MAN_U_R, 0x00 }, { DA7213_ALC_CIC_OP_LVL_CTRL, 0x00 }, { DA7213_DAC_NG_SETUP_TIME, 0x00 }, { DA7213_DAC_NG_OFF_THRESHOLD, 0x00 }, { DA7213_DAC_NG_ON_THRESHOLD, 0x00 }, { DA7213_DAC_NG_CTRL, 0x00 }, }; static bool da7213_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { case DA7213_STATUS1: case DA7213_PLL_STATUS: case DA7213_AUX_L_GAIN_STATUS: case DA7213_AUX_R_GAIN_STATUS: case DA7213_MIC_1_GAIN_STATUS: case DA7213_MIC_2_GAIN_STATUS: case DA7213_MIXIN_L_GAIN_STATUS: case DA7213_MIXIN_R_GAIN_STATUS: case DA7213_ADC_L_GAIN_STATUS: case DA7213_ADC_R_GAIN_STATUS: case DA7213_DAC_L_GAIN_STATUS: case DA7213_DAC_R_GAIN_STATUS: case DA7213_HP_L_GAIN_STATUS: case DA7213_HP_R_GAIN_STATUS: case DA7213_LINE_GAIN_STATUS: case DA7213_ALC_CTRL1: case DA7213_ALC_OFFSET_AUTO_M_L: case DA7213_ALC_OFFSET_AUTO_U_L: case DA7213_ALC_OFFSET_AUTO_M_R: case DA7213_ALC_OFFSET_AUTO_U_R: case DA7213_ALC_CIC_OP_LVL_DATA: return true; default: return false; } } static int da7213_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); u8 dai_clk_mode = DA7213_DAI_BCLKS_PER_WCLK_64; u8 dai_ctrl = 0; u8 fs; /* Set channels */ switch (params_channels(params)) { case 1: if (da7213->fmt != DA7213_DAI_FORMAT_DSP) { dev_err(component->dev, "Mono supported only in DSP mode\n"); return -EINVAL; } dai_ctrl |= DA7213_DAI_MONO_MODE_EN; break; case 2: dai_ctrl &= ~(DA7213_DAI_MONO_MODE_EN); break; default: return -EINVAL; } /* Set DAI format */ switch (params_width(params)) { case 16: dai_ctrl |= DA7213_DAI_WORD_LENGTH_S16_LE; dai_clk_mode = DA7213_DAI_BCLKS_PER_WCLK_32; /* 32bit for 1ch and 2ch */ break; case 20: dai_ctrl |= DA7213_DAI_WORD_LENGTH_S20_LE; break; case 24: dai_ctrl |= DA7213_DAI_WORD_LENGTH_S24_LE; break; case 32: dai_ctrl |= DA7213_DAI_WORD_LENGTH_S32_LE; break; default: return -EINVAL; } /* Set sampling rate */ switch (params_rate(params)) { case 8000: fs = DA7213_SR_8000; da7213->out_rate = DA7213_PLL_FREQ_OUT_98304000; break; case 11025: fs = DA7213_SR_11025; da7213->out_rate = DA7213_PLL_FREQ_OUT_90316800; break; case 12000: fs = DA7213_SR_12000; da7213->out_rate = DA7213_PLL_FREQ_OUT_98304000; break; case 16000: fs = DA7213_SR_16000; da7213->out_rate = DA7213_PLL_FREQ_OUT_98304000; break; case 22050: fs = DA7213_SR_22050; da7213->out_rate = DA7213_PLL_FREQ_OUT_90316800; break; case 32000: fs = DA7213_SR_32000; da7213->out_rate = DA7213_PLL_FREQ_OUT_98304000; break; case 44100: fs = DA7213_SR_44100; da7213->out_rate = DA7213_PLL_FREQ_OUT_90316800; break; case 48000: fs = DA7213_SR_48000; da7213->out_rate = DA7213_PLL_FREQ_OUT_98304000; break; case 88200: fs = DA7213_SR_88200; da7213->out_rate = DA7213_PLL_FREQ_OUT_90316800; break; case 96000: fs = DA7213_SR_96000; da7213->out_rate = DA7213_PLL_FREQ_OUT_98304000; break; default: return -EINVAL; } snd_soc_component_update_bits(component, DA7213_DAI_CLK_MODE, DA7213_DAI_BCLKS_PER_WCLK_MASK, dai_clk_mode); snd_soc_component_update_bits(component, DA7213_DAI_CTRL, DA7213_DAI_WORD_LENGTH_MASK | DA7213_DAI_MONO_MODE_MASK, dai_ctrl); snd_soc_component_write(component, DA7213_SR, fs); return 0; } static int da7213_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_component *component = codec_dai->component; struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); u8 dai_clk_mode = 0, dai_ctrl = 0; u8 dai_offset = 0; /* Set master/slave mode */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBP_CFP: da7213->master = true; break; case SND_SOC_DAIFMT_CBC_CFC: da7213->master = false; break; default: return -EINVAL; } /* Set clock normal/inverted */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_LEFT_J: case SND_SOC_DAIFMT_RIGHT_J: switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_NB_IF: dai_clk_mode |= DA7213_DAI_WCLK_POL_INV; break; case SND_SOC_DAIFMT_IB_NF: dai_clk_mode |= DA7213_DAI_CLK_POL_INV; break; case SND_SOC_DAIFMT_IB_IF: dai_clk_mode |= DA7213_DAI_WCLK_POL_INV | DA7213_DAI_CLK_POL_INV; break; default: return -EINVAL; } break; case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: /* The bclk is inverted wrt ASoC conventions */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: dai_clk_mode |= DA7213_DAI_CLK_POL_INV; break; case SND_SOC_DAIFMT_NB_IF: dai_clk_mode |= DA7213_DAI_WCLK_POL_INV | DA7213_DAI_CLK_POL_INV; break; case SND_SOC_DAIFMT_IB_NF: break; case SND_SOC_DAIFMT_IB_IF: dai_clk_mode |= DA7213_DAI_WCLK_POL_INV; break; default: return -EINVAL; } break; default: return -EINVAL; } /* Only I2S is supported */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: dai_ctrl |= DA7213_DAI_FORMAT_I2S_MODE; da7213->fmt = DA7213_DAI_FORMAT_I2S_MODE; break; case SND_SOC_DAIFMT_LEFT_J: dai_ctrl |= DA7213_DAI_FORMAT_LEFT_J; da7213->fmt = DA7213_DAI_FORMAT_LEFT_J; break; case SND_SOC_DAIFMT_RIGHT_J: dai_ctrl |= DA7213_DAI_FORMAT_RIGHT_J; da7213->fmt = DA7213_DAI_FORMAT_RIGHT_J; break; case SND_SOC_DAIFMT_DSP_A: /* L data MSB after FRM LRC */ dai_ctrl |= DA7213_DAI_FORMAT_DSP; dai_offset = 1; da7213->fmt = DA7213_DAI_FORMAT_DSP; break; case SND_SOC_DAIFMT_DSP_B: /* L data MSB during FRM LRC */ dai_ctrl |= DA7213_DAI_FORMAT_DSP; da7213->fmt = DA7213_DAI_FORMAT_DSP; break; default: return -EINVAL; } /* By default only 64 BCLK per WCLK is supported */ dai_clk_mode |= DA7213_DAI_BCLKS_PER_WCLK_64; snd_soc_component_update_bits(component, DA7213_DAI_CLK_MODE, DA7213_DAI_BCLKS_PER_WCLK_MASK | DA7213_DAI_CLK_POL_MASK | DA7213_DAI_WCLK_POL_MASK, dai_clk_mode); snd_soc_component_update_bits(component, DA7213_DAI_CTRL, DA7213_DAI_FORMAT_MASK, dai_ctrl); snd_soc_component_write(component, DA7213_DAI_OFFSET, dai_offset); return 0; } static int da7213_mute(struct snd_soc_dai *dai, int mute, int direction) { struct snd_soc_component *component = dai->component; if (mute) { snd_soc_component_update_bits(component, DA7213_DAC_L_CTRL, DA7213_MUTE_EN, DA7213_MUTE_EN); snd_soc_component_update_bits(component, DA7213_DAC_R_CTRL, DA7213_MUTE_EN, DA7213_MUTE_EN); } else { snd_soc_component_update_bits(component, DA7213_DAC_L_CTRL, DA7213_MUTE_EN, 0); snd_soc_component_update_bits(component, DA7213_DAC_R_CTRL, DA7213_MUTE_EN, 0); } return 0; } #define DA7213_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static int da7213_set_component_sysclk(struct snd_soc_component *component, int clk_id, int source, unsigned int freq, int dir) { struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); int ret = 0; if ((da7213->clk_src == clk_id) && (da7213->mclk_rate == freq)) return 0; /* Maybe audio stream is closing. */ if (freq == 0) return 0; if (((freq < da7213->fin_min_rate) && (freq != 32768)) || (freq > 54000000)) { dev_err(component->dev, "Unsupported MCLK value %d\n", freq); return -EINVAL; } switch (clk_id) { case DA7213_CLKSRC_MCLK: snd_soc_component_update_bits(component, DA7213_PLL_CTRL, DA7213_PLL_MCLK_SQR_EN, 0); break; case DA7213_CLKSRC_MCLK_SQR: snd_soc_component_update_bits(component, DA7213_PLL_CTRL, DA7213_PLL_MCLK_SQR_EN, DA7213_PLL_MCLK_SQR_EN); break; default: dev_err(component->dev, "Unknown clock source %d\n", clk_id); return -EINVAL; } da7213->clk_src = clk_id; if (da7213->mclk) { freq = clk_round_rate(da7213->mclk, freq); ret = clk_set_rate(da7213->mclk, freq); if (ret) { dev_err(component->dev, "Failed to set clock rate %d\n", freq); return ret; } } da7213->mclk_rate = freq; return 0; } /* Supported PLL input frequencies are 32KHz, 5MHz - 54MHz. */ static int _da7213_set_component_pll(struct snd_soc_component *component, int pll_id, int source, unsigned int fref, unsigned int fout) { struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); u8 pll_ctrl, indiv_bits, indiv; u8 pll_frac_top, pll_frac_bot, pll_integer; u32 freq_ref; u64 frac_div; /* Workout input divider based on MCLK rate */ if (da7213->mclk_rate == 32768) { if (!da7213->master) { dev_err(component->dev, "32KHz only valid if codec is clock master\n"); return -EINVAL; } /* 32KHz PLL Mode */ indiv_bits = DA7213_PLL_INDIV_9_TO_18_MHZ; indiv = DA7213_PLL_INDIV_9_TO_18_MHZ_VAL; source = DA7213_SYSCLK_PLL_32KHZ; freq_ref = 3750000; } else { if (da7213->mclk_rate < 5000000) { dev_err(component->dev, "PLL input clock %d below valid range\n", da7213->mclk_rate); return -EINVAL; } else if (da7213->mclk_rate <= 9000000) { indiv_bits = DA7213_PLL_INDIV_5_TO_9_MHZ; indiv = DA7213_PLL_INDIV_5_TO_9_MHZ_VAL; } else if (da7213->mclk_rate <= 18000000) { indiv_bits = DA7213_PLL_INDIV_9_TO_18_MHZ; indiv = DA7213_PLL_INDIV_9_TO_18_MHZ_VAL; } else if (da7213->mclk_rate <= 36000000) { indiv_bits = DA7213_PLL_INDIV_18_TO_36_MHZ; indiv = DA7213_PLL_INDIV_18_TO_36_MHZ_VAL; } else if (da7213->mclk_rate <= 54000000) { indiv_bits = DA7213_PLL_INDIV_36_TO_54_MHZ; indiv = DA7213_PLL_INDIV_36_TO_54_MHZ_VAL; } else { dev_err(component->dev, "PLL input clock %d above valid range\n", da7213->mclk_rate); return -EINVAL; } freq_ref = (da7213->mclk_rate / indiv); } pll_ctrl = indiv_bits; /* Configure PLL */ switch (source) { case DA7213_SYSCLK_MCLK: snd_soc_component_update_bits(component, DA7213_PLL_CTRL, DA7213_PLL_INDIV_MASK | DA7213_PLL_MODE_MASK, pll_ctrl); return 0; case DA7213_SYSCLK_PLL: break; case DA7213_SYSCLK_PLL_SRM: pll_ctrl |= DA7213_PLL_SRM_EN; fout = DA7213_PLL_FREQ_OUT_94310400; break; case DA7213_SYSCLK_PLL_32KHZ: if (da7213->mclk_rate != 32768) { dev_err(component->dev, "32KHz mode only valid with 32KHz MCLK\n"); return -EINVAL; } pll_ctrl |= DA7213_PLL_32K_MODE | DA7213_PLL_SRM_EN; fout = DA7213_PLL_FREQ_OUT_94310400; break; default: dev_err(component->dev, "Invalid PLL config\n"); return -EINVAL; } /* Calculate dividers for PLL */ pll_integer = fout / freq_ref; frac_div = (u64)(fout % freq_ref) * 8192ULL; do_div(frac_div, freq_ref); pll_frac_top = (frac_div >> DA7213_BYTE_SHIFT) & DA7213_BYTE_MASK; pll_frac_bot = (frac_div) & DA7213_BYTE_MASK; /* Write PLL dividers */ snd_soc_component_write(component, DA7213_PLL_FRAC_TOP, pll_frac_top); snd_soc_component_write(component, DA7213_PLL_FRAC_BOT, pll_frac_bot); snd_soc_component_write(component, DA7213_PLL_INTEGER, pll_integer); /* Enable PLL */ pll_ctrl |= DA7213_PLL_EN; snd_soc_component_update_bits(component, DA7213_PLL_CTRL, DA7213_PLL_INDIV_MASK | DA7213_PLL_MODE_MASK, pll_ctrl); /* Assist 32KHz mode PLL lock */ if (source == DA7213_SYSCLK_PLL_32KHZ) { snd_soc_component_write(component, 0xF0, 0x8B); snd_soc_component_write(component, 0xF1, 0x03); snd_soc_component_write(component, 0xF1, 0x01); snd_soc_component_write(component, 0xF0, 0x00); } return 0; } static int da7213_set_component_pll(struct snd_soc_component *component, int pll_id, int source, unsigned int fref, unsigned int fout) { struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); da7213->fixed_clk_auto_pll = false; return _da7213_set_component_pll(component, pll_id, source, fref, fout); } /* * Select below from Sound Card, not Auto * SND_SOC_DAIFMT_CBC_CFC * SND_SOC_DAIFMT_CBP_CFP */ static const u64 da7213_dai_formats = SND_SOC_POSSIBLE_DAIFMT_I2S | SND_SOC_POSSIBLE_DAIFMT_LEFT_J | SND_SOC_POSSIBLE_DAIFMT_RIGHT_J | SND_SOC_POSSIBLE_DAIFMT_DSP_A | SND_SOC_POSSIBLE_DAIFMT_DSP_B | SND_SOC_POSSIBLE_DAIFMT_NB_NF | SND_SOC_POSSIBLE_DAIFMT_NB_IF | SND_SOC_POSSIBLE_DAIFMT_IB_NF | SND_SOC_POSSIBLE_DAIFMT_IB_IF; /* DAI operations */ static const struct snd_soc_dai_ops da7213_dai_ops = { .hw_params = da7213_hw_params, .set_fmt = da7213_set_dai_fmt, .mute_stream = da7213_mute, .no_capture_mute = 1, .auto_selectable_formats = &da7213_dai_formats, .num_auto_selectable_formats = 1, }; static struct snd_soc_dai_driver da7213_dai = { .name = "da7213-hifi", /* Playback Capabilities */ .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = DA7213_FORMATS, }, /* Capture Capabilities */ .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = SNDRV_PCM_RATE_8000_96000, .formats = DA7213_FORMATS, }, .ops = &da7213_dai_ops, .symmetric_rate = 1, }; static int da7213_set_auto_pll(struct snd_soc_component *component, bool enable) { struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); int mode; if (!da7213->fixed_clk_auto_pll) return 0; da7213->mclk_rate = clk_get_rate(da7213->mclk); if (enable) { /* Slave mode needs SRM for non-harmonic frequencies */ if (da7213->master) mode = DA7213_SYSCLK_PLL; else mode = DA7213_SYSCLK_PLL_SRM; /* PLL is not required for harmonic frequencies */ switch (da7213->out_rate) { case DA7213_PLL_FREQ_OUT_90316800: if (da7213->mclk_rate == 11289600 || da7213->mclk_rate == 22579200 || da7213->mclk_rate == 45158400) mode = DA7213_SYSCLK_MCLK; break; case DA7213_PLL_FREQ_OUT_98304000: if (da7213->mclk_rate == 12288000 || da7213->mclk_rate == 24576000 || da7213->mclk_rate == 49152000) mode = DA7213_SYSCLK_MCLK; break; default: return -1; } } else { /* Disable PLL in standby */ mode = DA7213_SYSCLK_MCLK; } return _da7213_set_component_pll(component, 0, mode, da7213->mclk_rate, da7213->out_rate); } static int da7213_set_bias_level(struct snd_soc_component *component, enum snd_soc_bias_level level) { struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); int ret; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* Enable MCLK for transition to ON state */ if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_STANDBY) { if (da7213->mclk) { ret = clk_prepare_enable(da7213->mclk); if (ret) { dev_err(component->dev, "Failed to enable mclk\n"); return ret; } da7213_set_auto_pll(component, true); } } break; case SND_SOC_BIAS_STANDBY: if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) { /* Enable VMID reference & master bias */ snd_soc_component_update_bits(component, DA7213_REFERENCES, DA7213_VMID_EN | DA7213_BIAS_EN, DA7213_VMID_EN | DA7213_BIAS_EN); } else { /* Remove MCLK */ if (da7213->mclk) { da7213_set_auto_pll(component, false); clk_disable_unprepare(da7213->mclk); } } break; case SND_SOC_BIAS_OFF: /* Disable VMID reference & master bias */ snd_soc_component_update_bits(component, DA7213_REFERENCES, DA7213_VMID_EN | DA7213_BIAS_EN, 0); break; } return 0; } #define DA7213_FIN_MIN_RATE (5 * MEGA) #define DA7212_FIN_MIN_RATE (2 * MEGA) #if defined(CONFIG_OF) /* DT */ static const struct of_device_id da7213_of_match[] = { { .compatible = "dlg,da7212", .data = (void *)DA7212_FIN_MIN_RATE }, { .compatible = "dlg,da7213", .data = (void *)DA7213_FIN_MIN_RATE }, { } }; MODULE_DEVICE_TABLE(of, da7213_of_match); #endif #ifdef CONFIG_ACPI static const struct acpi_device_id da7213_acpi_match[] = { { "DLGS7212", DA7212_FIN_MIN_RATE }, { "DLGS7213", DA7213_FIN_MIN_RATE }, { }, }; MODULE_DEVICE_TABLE(acpi, da7213_acpi_match); #endif static enum da7213_micbias_voltage da7213_of_micbias_lvl(struct snd_soc_component *component, u32 val) { switch (val) { case 1600: return DA7213_MICBIAS_1_6V; case 2200: return DA7213_MICBIAS_2_2V; case 2500: return DA7213_MICBIAS_2_5V; case 3000: return DA7213_MICBIAS_3_0V; default: dev_warn(component->dev, "Invalid micbias level\n"); return DA7213_MICBIAS_2_2V; } } static enum da7213_dmic_data_sel da7213_of_dmic_data_sel(struct snd_soc_component *component, const char *str) { if (!strcmp(str, "lrise_rfall")) { return DA7213_DMIC_DATA_LRISE_RFALL; } else if (!strcmp(str, "lfall_rrise")) { return DA7213_DMIC_DATA_LFALL_RRISE; } else { dev_warn(component->dev, "Invalid DMIC data select type\n"); return DA7213_DMIC_DATA_LRISE_RFALL; } } static enum da7213_dmic_samplephase da7213_of_dmic_samplephase(struct snd_soc_component *component, const char *str) { if (!strcmp(str, "on_clkedge")) { return DA7213_DMIC_SAMPLE_ON_CLKEDGE; } else if (!strcmp(str, "between_clkedge")) { return DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE; } else { dev_warn(component->dev, "Invalid DMIC sample phase\n"); return DA7213_DMIC_SAMPLE_ON_CLKEDGE; } } static enum da7213_dmic_clk_rate da7213_of_dmic_clkrate(struct snd_soc_component *component, u32 val) { switch (val) { case 1500000: return DA7213_DMIC_CLK_1_5MHZ; case 3000000: return DA7213_DMIC_CLK_3_0MHZ; default: dev_warn(component->dev, "Invalid DMIC clock rate\n"); return DA7213_DMIC_CLK_1_5MHZ; } } static struct da7213_platform_data *da7213_fw_to_pdata(struct snd_soc_component *component) { struct device *dev = component->dev; struct da7213_platform_data *pdata; const char *fw_str; u32 fw_val32; pdata = devm_kzalloc(component->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return NULL; if (device_property_read_u32(dev, "dlg,micbias1-lvl", &fw_val32) >= 0) pdata->micbias1_lvl = da7213_of_micbias_lvl(component, fw_val32); else pdata->micbias1_lvl = DA7213_MICBIAS_2_2V; if (device_property_read_u32(dev, "dlg,micbias2-lvl", &fw_val32) >= 0) pdata->micbias2_lvl = da7213_of_micbias_lvl(component, fw_val32); else pdata->micbias2_lvl = DA7213_MICBIAS_2_2V; if (!device_property_read_string(dev, "dlg,dmic-data-sel", &fw_str)) pdata->dmic_data_sel = da7213_of_dmic_data_sel(component, fw_str); else pdata->dmic_data_sel = DA7213_DMIC_DATA_LRISE_RFALL; if (!device_property_read_string(dev, "dlg,dmic-samplephase", &fw_str)) pdata->dmic_samplephase = da7213_of_dmic_samplephase(component, fw_str); else pdata->dmic_samplephase = DA7213_DMIC_SAMPLE_ON_CLKEDGE; if (device_property_read_u32(dev, "dlg,dmic-clkrate", &fw_val32) >= 0) pdata->dmic_clk_rate = da7213_of_dmic_clkrate(component, fw_val32); else pdata->dmic_clk_rate = DA7213_DMIC_CLK_3_0MHZ; return pdata; } static int da7213_probe(struct snd_soc_component *component) { struct da7213_priv *da7213 = snd_soc_component_get_drvdata(component); pm_runtime_get_sync(component->dev); /* Default to using ALC auto offset calibration mode. */ snd_soc_component_update_bits(component, DA7213_ALC_CTRL1, DA7213_ALC_CALIB_MODE_MAN, 0); da7213->alc_calib_auto = true; /* Default PC counter to free-running */ snd_soc_component_update_bits(component, DA7213_PC_COUNT, DA7213_PC_FREERUN_MASK, DA7213_PC_FREERUN_MASK); /* Enable all Gain Ramps */ snd_soc_component_update_bits(component, DA7213_AUX_L_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); snd_soc_component_update_bits(component, DA7213_AUX_R_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); snd_soc_component_update_bits(component, DA7213_MIXIN_L_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); snd_soc_component_update_bits(component, DA7213_MIXIN_R_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); snd_soc_component_update_bits(component, DA7213_ADC_L_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); snd_soc_component_update_bits(component, DA7213_ADC_R_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); snd_soc_component_update_bits(component, DA7213_DAC_L_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); snd_soc_component_update_bits(component, DA7213_DAC_R_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); snd_soc_component_update_bits(component, DA7213_HP_L_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); snd_soc_component_update_bits(component, DA7213_HP_R_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); snd_soc_component_update_bits(component, DA7213_LINE_CTRL, DA7213_GAIN_RAMP_EN, DA7213_GAIN_RAMP_EN); /* * There are two separate control bits for input and output mixers as * well as headphone and line outs. * One to enable corresponding amplifier and other to enable its * output. As amplifier bits are related to power control, they are * being managed by DAPM while other (non power related) bits are * enabled here */ snd_soc_component_update_bits(component, DA7213_MIXIN_L_CTRL, DA7213_MIXIN_MIX_EN, DA7213_MIXIN_MIX_EN); snd_soc_component_update_bits(component, DA7213_MIXIN_R_CTRL, DA7213_MIXIN_MIX_EN, DA7213_MIXIN_MIX_EN); snd_soc_component_update_bits(component, DA7213_MIXOUT_L_CTRL, DA7213_MIXOUT_MIX_EN, DA7213_MIXOUT_MIX_EN); snd_soc_component_update_bits(component, DA7213_MIXOUT_R_CTRL, DA7213_MIXOUT_MIX_EN, DA7213_MIXOUT_MIX_EN); snd_soc_component_update_bits(component, DA7213_HP_L_CTRL, DA7213_HP_AMP_OE, DA7213_HP_AMP_OE); snd_soc_component_update_bits(component, DA7213_HP_R_CTRL, DA7213_HP_AMP_OE, DA7213_HP_AMP_OE); snd_soc_component_update_bits(component, DA7213_LINE_CTRL, DA7213_LINE_AMP_OE, DA7213_LINE_AMP_OE); /* Handle DT/Platform data */ da7213->pdata = dev_get_platdata(component->dev); if (!da7213->pdata) da7213->pdata = da7213_fw_to_pdata(component); /* Set platform data values */ if (da7213->pdata) { struct da7213_platform_data *pdata = da7213->pdata; u8 micbias_lvl = 0, dmic_cfg = 0; /* Set Mic Bias voltages */ switch (pdata->micbias1_lvl) { case DA7213_MICBIAS_1_6V: case DA7213_MICBIAS_2_2V: case DA7213_MICBIAS_2_5V: case DA7213_MICBIAS_3_0V: micbias_lvl |= (pdata->micbias1_lvl << DA7213_MICBIAS1_LEVEL_SHIFT); break; } switch (pdata->micbias2_lvl) { case DA7213_MICBIAS_1_6V: case DA7213_MICBIAS_2_2V: case DA7213_MICBIAS_2_5V: case DA7213_MICBIAS_3_0V: micbias_lvl |= (pdata->micbias2_lvl << DA7213_MICBIAS2_LEVEL_SHIFT); break; } snd_soc_component_update_bits(component, DA7213_MICBIAS_CTRL, DA7213_MICBIAS1_LEVEL_MASK | DA7213_MICBIAS2_LEVEL_MASK, micbias_lvl); /* Set DMIC configuration */ switch (pdata->dmic_data_sel) { case DA7213_DMIC_DATA_LFALL_RRISE: case DA7213_DMIC_DATA_LRISE_RFALL: dmic_cfg |= (pdata->dmic_data_sel << DA7213_DMIC_DATA_SEL_SHIFT); break; } switch (pdata->dmic_samplephase) { case DA7213_DMIC_SAMPLE_ON_CLKEDGE: case DA7213_DMIC_SAMPLE_BETWEEN_CLKEDGE: dmic_cfg |= (pdata->dmic_samplephase << DA7213_DMIC_SAMPLEPHASE_SHIFT); break; } switch (pdata->dmic_clk_rate) { case DA7213_DMIC_CLK_3_0MHZ: case DA7213_DMIC_CLK_1_5MHZ: dmic_cfg |= (pdata->dmic_clk_rate << DA7213_DMIC_CLK_RATE_SHIFT); break; } snd_soc_component_update_bits(component, DA7213_MIC_CONFIG, DA7213_DMIC_DATA_SEL_MASK | DA7213_DMIC_SAMPLEPHASE_MASK | DA7213_DMIC_CLK_RATE_MASK, dmic_cfg); } pm_runtime_put_sync(component->dev); /* Check if MCLK provided */ da7213->mclk = devm_clk_get_optional(component->dev, "mclk"); if (IS_ERR(da7213->mclk)) return PTR_ERR(da7213->mclk); if (da7213->mclk) /* Do automatic PLL handling assuming fixed clock until * set_pll() has been called. This makes the codec usable * with the simple-audio-card driver. */ da7213->fixed_clk_auto_pll = true; /* Default infinite tone gen, start/stop by Kcontrol */ snd_soc_component_write(component, DA7213_TONE_GEN_CYCLES, DA7213_BEEP_CYCLES_MASK); return 0; } static const struct snd_soc_component_driver soc_component_dev_da7213 = { .probe = da7213_probe, .set_bias_level = da7213_set_bias_level, .controls = da7213_snd_controls, .num_controls = ARRAY_SIZE(da7213_snd_controls), .dapm_widgets = da7213_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(da7213_dapm_widgets), .dapm_routes = da7213_audio_map, .num_dapm_routes = ARRAY_SIZE(da7213_audio_map), .set_sysclk = da7213_set_component_sysclk, .set_pll = da7213_set_component_pll, .idle_bias_on = 1, .use_pmdown_time = 1, .endianness = 1, }; static const struct regmap_config da7213_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = DA7213_TONE_GEN_OFF_PER, .reg_defaults = da7213_reg_defaults, .num_reg_defaults = ARRAY_SIZE(da7213_reg_defaults), .volatile_reg = da7213_volatile_register, .cache_type = REGCACHE_RBTREE, }; static void da7213_power_off(void *data) { struct da7213_priv *da7213 = data; regulator_bulk_disable(DA7213_NUM_SUPPLIES, da7213->supplies); } static const char *da7213_supply_names[DA7213_NUM_SUPPLIES] = { [DA7213_SUPPLY_VDDA] = "VDDA", [DA7213_SUPPLY_VDDIO] = "VDDIO", }; static int da7213_i2c_probe(struct i2c_client *i2c) { struct da7213_priv *da7213; int i, ret; da7213 = devm_kzalloc(&i2c->dev, sizeof(*da7213), GFP_KERNEL); if (!da7213) return -ENOMEM; da7213->fin_min_rate = (uintptr_t)i2c_get_match_data(i2c); if (!da7213->fin_min_rate) return -EINVAL; i2c_set_clientdata(i2c, da7213); /* Get required supplies */ for (i = 0; i < DA7213_NUM_SUPPLIES; ++i) da7213->supplies[i].supply = da7213_supply_names[i]; ret = devm_regulator_bulk_get(&i2c->dev, DA7213_NUM_SUPPLIES, da7213->supplies); if (ret) { dev_err(&i2c->dev, "Failed to get supplies: %d\n", ret); return ret; } ret = regulator_bulk_enable(DA7213_NUM_SUPPLIES, da7213->supplies); if (ret < 0) return ret; ret = devm_add_action_or_reset(&i2c->dev, da7213_power_off, da7213); if (ret < 0) return ret; da7213->regmap = devm_regmap_init_i2c(i2c, &da7213_regmap_config); if (IS_ERR(da7213->regmap)) { ret = PTR_ERR(da7213->regmap); dev_err(&i2c->dev, "regmap_init() failed: %d\n", ret); return ret; } pm_runtime_set_autosuspend_delay(&i2c->dev, 100); pm_runtime_use_autosuspend(&i2c->dev); pm_runtime_set_active(&i2c->dev); pm_runtime_enable(&i2c->dev); ret = devm_snd_soc_register_component(&i2c->dev, &soc_component_dev_da7213, &da7213_dai, 1); if (ret < 0) { dev_err(&i2c->dev, "Failed to register da7213 component: %d\n", ret); } return ret; } static void da7213_i2c_remove(struct i2c_client *i2c) { pm_runtime_disable(&i2c->dev); } static int __maybe_unused da7213_runtime_suspend(struct device *dev) { struct da7213_priv *da7213 = dev_get_drvdata(dev); regcache_cache_only(da7213->regmap, true); regcache_mark_dirty(da7213->regmap); regulator_bulk_disable(DA7213_NUM_SUPPLIES, da7213->supplies); return 0; } static int __maybe_unused da7213_runtime_resume(struct device *dev) { struct da7213_priv *da7213 = dev_get_drvdata(dev); int ret; ret = regulator_bulk_enable(DA7213_NUM_SUPPLIES, da7213->supplies); if (ret < 0) return ret; regcache_cache_only(da7213->regmap, false); return regcache_sync(da7213->regmap); } static const struct dev_pm_ops da7213_pm = { SET_RUNTIME_PM_OPS(da7213_runtime_suspend, da7213_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume) }; static const struct i2c_device_id da7213_i2c_id[] = { { "da7213" }, { } }; MODULE_DEVICE_TABLE(i2c, da7213_i2c_id); /* I2C codec control layer */ static struct i2c_driver da7213_i2c_driver = { .driver = { .name = "da7213", .of_match_table = of_match_ptr(da7213_of_match), .acpi_match_table = ACPI_PTR(da7213_acpi_match), .pm = &da7213_pm, }, .probe = da7213_i2c_probe, .remove = da7213_i2c_remove, .id_table = da7213_i2c_id, }; module_i2c_driver(da7213_i2c_driver); MODULE_DESCRIPTION("ASoC DA7213 Codec driver"); MODULE_AUTHOR("Adam Thomson <[email protected]>"); MODULE_AUTHOR("David Rau <[email protected]>"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ /* * Filesystem access notification for Linux * * Copyright (C) 2008 Red Hat, Inc., Eric Paris <[email protected]> */ #ifndef __LINUX_FSNOTIFY_BACKEND_H #define __LINUX_FSNOTIFY_BACKEND_H #ifdef __KERNEL__ #include <linux/idr.h> /* inotify uses this */ #include <linux/fs.h> /* struct inode */ #include <linux/list.h> #include <linux/path.h> /* struct path */ #include <linux/spinlock.h> #include <linux/types.h> #include <linux/atomic.h> #include <linux/user_namespace.h> #include <linux/refcount.h> #include <linux/mempool.h> #include <linux/sched/mm.h> /* * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily * convert between them. dnotify only needs conversion at watch creation * so no perf loss there. fanotify isn't defined yet, so it can use the * wholes if it needs more events. */ #define FS_ACCESS 0x00000001 /* File was accessed */ #define FS_MODIFY 0x00000002 /* File was modified */ #define FS_ATTRIB 0x00000004 /* Metadata changed */ #define FS_CLOSE_WRITE 0x00000008 /* Writable file was closed */ #define FS_CLOSE_NOWRITE 0x00000010 /* Unwritable file closed */ #define FS_OPEN 0x00000020 /* File was opened */ #define FS_MOVED_FROM 0x00000040 /* File was moved from X */ #define FS_MOVED_TO 0x00000080 /* File was moved to Y */ #define FS_CREATE 0x00000100 /* Subfile was created */ #define FS_DELETE 0x00000200 /* Subfile was deleted */ #define FS_DELETE_SELF 0x00000400 /* Self was deleted */ #define FS_MOVE_SELF 0x00000800 /* Self was moved */ #define FS_OPEN_EXEC 0x00001000 /* File was opened for exec */ #define FS_UNMOUNT 0x00002000 /* inode on umount fs */ #define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */ #define FS_ERROR 0x00008000 /* Filesystem Error (fanotify) */ /* * FS_IN_IGNORED overloads FS_ERROR. It is only used internally by inotify * which does not support FS_ERROR. */ #define FS_IN_IGNORED 0x00008000 /* last inotify event here */ #define FS_OPEN_PERM 0x00010000 /* open event in an permission hook */ #define FS_ACCESS_PERM 0x00020000 /* access event in a permissions hook */ #define FS_OPEN_EXEC_PERM 0x00040000 /* open/exec event in a permission hook */ /* * Set on inode mark that cares about things that happen to its children. * Always set for dnotify and inotify. * Set on inode/sb/mount marks that care about parent/name info. */ #define FS_EVENT_ON_CHILD 0x08000000 #define FS_RENAME 0x10000000 /* File was renamed */ #define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */ #define FS_ISDIR 0x40000000 /* event occurred against dir */ #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) /* * Directory entry modification events - reported only to directory * where entry is modified and not to a watching parent. * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event * when a directory entry inside a child subdir changes. */ #define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE | FS_RENAME) #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ FS_OPEN_EXEC_PERM) /* * This is a list of all events that may get sent to a parent that is watching * with flag FS_EVENT_ON_CHILD based on fs event on a child of that directory. */ #define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \ FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \ FS_OPEN | FS_OPEN_EXEC) /* * This is a list of all events that may get sent with the parent inode as the * @to_tell argument of fsnotify(). * It may include events that can be sent to an inode/sb/mount mark, but cannot * be sent to a parent watching children. */ #define FS_EVENTS_POSS_TO_PARENT (FS_EVENTS_POSS_ON_CHILD) /* Events that can be reported to backends */ #define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \ FS_EVENTS_POSS_ON_CHILD | \ FS_DELETE_SELF | FS_MOVE_SELF | \ FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ FS_ERROR) /* Extra flags that may be reported with event or control handling of events */ #define ALL_FSNOTIFY_FLAGS (FS_ISDIR | FS_EVENT_ON_CHILD | FS_DN_MULTISHOT) #define ALL_FSNOTIFY_BITS (ALL_FSNOTIFY_EVENTS | ALL_FSNOTIFY_FLAGS) struct fsnotify_group; struct fsnotify_event; struct fsnotify_mark; struct fsnotify_event_private_data; struct fsnotify_fname; struct fsnotify_iter_info; struct mem_cgroup; /* * Each group much define these ops. The fsnotify infrastructure will call * these operations for each relevant group. * * handle_event - main call for a group to handle an fs event * @group: group to notify * @mask: event type and flags * @data: object that event happened on * @data_type: type of object for fanotify_data_XXX() accessors * @dir: optional directory associated with event - * if @file_name is not NULL, this is the directory that * @file_name is relative to * @file_name: optional file name associated with event * @cookie: inotify rename cookie * @iter_info: array of marks from this group that are interested in the event * * handle_inode_event - simple variant of handle_event() for groups that only * have inode marks and don't have ignore mask * @mark: mark to notify * @mask: event type and flags * @inode: inode that event happened on * @dir: optional directory associated with event - * if @file_name is not NULL, this is the directory that * @file_name is relative to. * Either @inode or @dir must be non-NULL. * @file_name: optional file name associated with event * @cookie: inotify rename cookie * * free_group_priv - called when a group refcnt hits 0 to clean up the private union * freeing_mark - called when a mark is being destroyed for some reason. The group * MUST be holding a reference on each mark and that reference must be * dropped in this function. inotify uses this function to send * userspace messages that marks have been removed. */ struct fsnotify_ops { int (*handle_event)(struct fsnotify_group *group, u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *file_name, u32 cookie, struct fsnotify_iter_info *iter_info); int (*handle_inode_event)(struct fsnotify_mark *mark, u32 mask, struct inode *inode, struct inode *dir, const struct qstr *file_name, u32 cookie); void (*free_group_priv)(struct fsnotify_group *group); void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group); void (*free_event)(struct fsnotify_group *group, struct fsnotify_event *event); /* called on final put+free to free memory */ void (*free_mark)(struct fsnotify_mark *mark); }; /* * all of the information about the original object we want to now send to * a group. If you want to carry more info from the accessing task to the * listener this structure is where you need to be adding fields. */ struct fsnotify_event { struct list_head list; }; /* * fsnotify group priorities. * Events are sent in order from highest priority to lowest priority. */ enum fsnotify_group_prio { FSNOTIFY_PRIO_NORMAL = 0, /* normal notifiers, no permissions */ FSNOTIFY_PRIO_CONTENT, /* fanotify permission events */ FSNOTIFY_PRIO_PRE_CONTENT, /* fanotify pre-content events */ __FSNOTIFY_PRIO_NUM }; /* * A group is a "thing" that wants to receive notification about filesystem * events. The mask holds the subset of event types this group cares about. * refcnt on a group is up to the implementor and at any moment if it goes 0 * everything will be cleaned up. */ struct fsnotify_group { const struct fsnotify_ops *ops; /* how this group handles things */ /* * How the refcnt is used is up to each group. When the refcnt hits 0 * fsnotify will clean up all of the resources associated with this group. * As an example, the dnotify group will always have a refcnt=1 and that * will never change. Inotify, on the other hand, has a group per * inotify_init() and the refcnt will hit 0 only when that fd has been * closed. */ refcount_t refcnt; /* things with interest in this group */ /* needed to send notification to userspace */ spinlock_t notification_lock; /* protect the notification_list */ struct list_head notification_list; /* list of event_holder this group needs to send to userspace */ wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */ unsigned int q_len; /* events on the queue */ unsigned int max_events; /* maximum events allowed on the list */ enum fsnotify_group_prio priority; /* priority for sending events */ bool shutdown; /* group is being shut down, don't queue more events */ #define FSNOTIFY_GROUP_USER 0x01 /* user allocated group */ #define FSNOTIFY_GROUP_DUPS 0x02 /* allow multiple marks per object */ int flags; unsigned int owner_flags; /* stored flags of mark_mutex owner */ /* stores all fastpath marks assoc with this group so they can be cleaned on unregister */ struct mutex mark_mutex; /* protect marks_list */ atomic_t user_waits; /* Number of tasks waiting for user * response */ struct list_head marks_list; /* all inode marks for this group */ struct fasync_struct *fsn_fa; /* async notification */ struct fsnotify_event *overflow_event; /* Event we queue when the * notification list is too * full */ struct mem_cgroup *memcg; /* memcg to charge allocations */ /* groups can define private fields here or use the void *private */ union { void *private; #ifdef CONFIG_INOTIFY_USER struct inotify_group_private_data { spinlock_t idr_lock; struct idr idr; struct ucounts *ucounts; } inotify_data; #endif #ifdef CONFIG_FANOTIFY struct fanotify_group_private_data { /* Hash table of events for merge */ struct hlist_head *merge_hash; /* allows a group to block waiting for a userspace response */ struct list_head access_list; wait_queue_head_t access_waitq; int flags; /* flags from fanotify_init() */ int f_flags; /* event_f_flags from fanotify_init() */ struct ucounts *ucounts; mempool_t error_events_pool; } fanotify_data; #endif /* CONFIG_FANOTIFY */ }; }; /* * These helpers are used to prevent deadlock when reclaiming inodes with * evictable marks of the same group that is allocating a new mark. */ static inline void fsnotify_group_lock(struct fsnotify_group *group) { mutex_lock(&group->mark_mutex); group->owner_flags = memalloc_nofs_save(); } static inline void fsnotify_group_unlock(struct fsnotify_group *group) { memalloc_nofs_restore(group->owner_flags); mutex_unlock(&group->mark_mutex); } static inline void fsnotify_group_assert_locked(struct fsnotify_group *group) { WARN_ON_ONCE(!mutex_is_locked(&group->mark_mutex)); WARN_ON_ONCE(!(current->flags & PF_MEMALLOC_NOFS)); } /* When calling fsnotify tell it if the data is a path or inode */ enum fsnotify_data_type { FSNOTIFY_EVENT_NONE, FSNOTIFY_EVENT_PATH, FSNOTIFY_EVENT_INODE, FSNOTIFY_EVENT_DENTRY, FSNOTIFY_EVENT_ERROR, }; struct fs_error_report { int error; struct inode *inode; struct super_block *sb; }; static inline struct inode *fsnotify_data_inode(const void *data, int data_type) { switch (data_type) { case FSNOTIFY_EVENT_INODE: return (struct inode *)data; case FSNOTIFY_EVENT_DENTRY: return d_inode(data); case FSNOTIFY_EVENT_PATH: return d_inode(((const struct path *)data)->dentry); case FSNOTIFY_EVENT_ERROR: return ((struct fs_error_report *)data)->inode; default: return NULL; } } static inline struct dentry *fsnotify_data_dentry(const void *data, int data_type) { switch (data_type) { case FSNOTIFY_EVENT_DENTRY: /* Non const is needed for dget() */ return (struct dentry *)data; case FSNOTIFY_EVENT_PATH: return ((const struct path *)data)->dentry; default: return NULL; } } static inline const struct path *fsnotify_data_path(const void *data, int data_type) { switch (data_type) { case FSNOTIFY_EVENT_PATH: return data; default: return NULL; } } static inline struct super_block *fsnotify_data_sb(const void *data, int data_type) { switch (data_type) { case FSNOTIFY_EVENT_INODE: return ((struct inode *)data)->i_sb; case FSNOTIFY_EVENT_DENTRY: return ((struct dentry *)data)->d_sb; case FSNOTIFY_EVENT_PATH: return ((const struct path *)data)->dentry->d_sb; case FSNOTIFY_EVENT_ERROR: return ((struct fs_error_report *) data)->sb; default: return NULL; } } static inline struct fs_error_report *fsnotify_data_error_report( const void *data, int data_type) { switch (data_type) { case FSNOTIFY_EVENT_ERROR: return (struct fs_error_report *) data; default: return NULL; } } /* * Index to merged marks iterator array that correlates to a type of watch. * The type of watched object can be deduced from the iterator type, but not * the other way around, because an event can match different watched objects * of the same object type. * For example, both parent and child are watching an object of type inode. */ enum fsnotify_iter_type { FSNOTIFY_ITER_TYPE_INODE, FSNOTIFY_ITER_TYPE_VFSMOUNT, FSNOTIFY_ITER_TYPE_SB, FSNOTIFY_ITER_TYPE_PARENT, FSNOTIFY_ITER_TYPE_INODE2, FSNOTIFY_ITER_TYPE_COUNT }; /* The type of object that a mark is attached to */ enum fsnotify_obj_type { FSNOTIFY_OBJ_TYPE_ANY = -1, FSNOTIFY_OBJ_TYPE_INODE, FSNOTIFY_OBJ_TYPE_VFSMOUNT, FSNOTIFY_OBJ_TYPE_SB, FSNOTIFY_OBJ_TYPE_COUNT, FSNOTIFY_OBJ_TYPE_DETACHED = FSNOTIFY_OBJ_TYPE_COUNT }; static inline bool fsnotify_valid_obj_type(unsigned int obj_type) { return (obj_type < FSNOTIFY_OBJ_TYPE_COUNT); } struct fsnotify_iter_info { struct fsnotify_mark *marks[FSNOTIFY_ITER_TYPE_COUNT]; struct fsnotify_group *current_group; unsigned int report_mask; int srcu_idx; }; static inline bool fsnotify_iter_should_report_type( struct fsnotify_iter_info *iter_info, int iter_type) { return (iter_info->report_mask & (1U << iter_type)); } static inline void fsnotify_iter_set_report_type( struct fsnotify_iter_info *iter_info, int iter_type) { iter_info->report_mask |= (1U << iter_type); } static inline struct fsnotify_mark *fsnotify_iter_mark( struct fsnotify_iter_info *iter_info, int iter_type) { if (fsnotify_iter_should_report_type(iter_info, iter_type)) return iter_info->marks[iter_type]; return NULL; } static inline int fsnotify_iter_step(struct fsnotify_iter_info *iter, int type, struct fsnotify_mark **markp) { while (type < FSNOTIFY_ITER_TYPE_COUNT) { *markp = fsnotify_iter_mark(iter, type); if (*markp) break; type++; } return type; } #define FSNOTIFY_ITER_FUNCS(name, NAME) \ static inline struct fsnotify_mark *fsnotify_iter_##name##_mark( \ struct fsnotify_iter_info *iter_info) \ { \ return fsnotify_iter_mark(iter_info, FSNOTIFY_ITER_TYPE_##NAME); \ } FSNOTIFY_ITER_FUNCS(inode, INODE) FSNOTIFY_ITER_FUNCS(parent, PARENT) FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT) FSNOTIFY_ITER_FUNCS(sb, SB) #define fsnotify_foreach_iter_type(type) \ for (type = 0; type < FSNOTIFY_ITER_TYPE_COUNT; type++) #define fsnotify_foreach_iter_mark_type(iter, mark, type) \ for (type = 0; \ type = fsnotify_iter_step(iter, type, &mark), \ type < FSNOTIFY_ITER_TYPE_COUNT; \ type++) /* * Inode/vfsmount/sb point to this structure which tracks all marks attached to * the inode/vfsmount/sb. The reference to inode/vfsmount/sb is held by this * structure. We destroy this structure when there are no more marks attached * to it. The structure is protected by fsnotify_mark_srcu. */ struct fsnotify_mark_connector { spinlock_t lock; unsigned char type; /* Type of object [lock] */ unsigned char prio; /* Highest priority group */ #define FSNOTIFY_CONN_FLAG_IS_WATCHED 0x01 #define FSNOTIFY_CONN_FLAG_HAS_IREF 0x02 unsigned short flags; /* flags [lock] */ union { /* Object pointer [lock] */ void *obj; /* Used listing heads to free after srcu period expires */ struct fsnotify_mark_connector *destroy_next; }; struct hlist_head list; }; /* * Container for per-sb fsnotify state (sb marks and more). * Attached lazily on first marked object on the sb and freed when killing sb. */ struct fsnotify_sb_info { struct fsnotify_mark_connector __rcu *sb_marks; /* * Number of inode/mount/sb objects that are being watched in this sb. * Note that inodes objects are currently double-accounted. * * The value in watched_objects[prio] is the number of objects that are * watched by groups of priority >= prio, so watched_objects[0] is the * total number of watched objects in this sb. */ atomic_long_t watched_objects[__FSNOTIFY_PRIO_NUM]; }; static inline struct fsnotify_sb_info *fsnotify_sb_info(struct super_block *sb) { #ifdef CONFIG_FSNOTIFY return READ_ONCE(sb->s_fsnotify_info); #else return NULL; #endif } static inline atomic_long_t *fsnotify_sb_watched_objects(struct super_block *sb) { return &fsnotify_sb_info(sb)->watched_objects[0]; } /* * A mark is simply an object attached to an in core inode which allows an * fsnotify listener to indicate they are either no longer interested in events * of a type matching mask or only interested in those events. * * These are flushed when an inode is evicted from core and may be flushed * when the inode is modified (as seen by fsnotify_access). Some fsnotify * users (such as dnotify) will flush these when the open fd is closed and not * at inode eviction or modification. * * Text in brackets is showing the lock(s) protecting modifications of a * particular entry. obj_lock means either inode->i_lock or * mnt->mnt_root->d_lock depending on the mark type. */ struct fsnotify_mark { /* Mask this mark is for [mark->lock, group->mark_mutex] */ __u32 mask; /* We hold one for presence in g_list. Also one ref for each 'thing' * in kernel that found and may be using this mark. */ refcount_t refcnt; /* Group this mark is for. Set on mark creation, stable until last ref * is dropped */ struct fsnotify_group *group; /* List of marks by group->marks_list. Also reused for queueing * mark into destroy_list when it's waiting for the end of SRCU period * before it can be freed. [group->mark_mutex] */ struct list_head g_list; /* Protects inode / mnt pointers, flags, masks */ spinlock_t lock; /* List of marks for inode / vfsmount [connector->lock, mark ref] */ struct hlist_node obj_list; /* Head of list of marks for an object [mark ref] */ struct fsnotify_mark_connector *connector; /* Events types and flags to ignore [mark->lock, group->mark_mutex] */ __u32 ignore_mask; /* General fsnotify mark flags */ #define FSNOTIFY_MARK_FLAG_ALIVE 0x0001 #define FSNOTIFY_MARK_FLAG_ATTACHED 0x0002 /* inotify mark flags */ #define FSNOTIFY_MARK_FLAG_EXCL_UNLINK 0x0010 #define FSNOTIFY_MARK_FLAG_IN_ONESHOT 0x0020 /* fanotify mark flags */ #define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x0100 #define FSNOTIFY_MARK_FLAG_NO_IREF 0x0200 #define FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS 0x0400 #define FSNOTIFY_MARK_FLAG_HAS_FSID 0x0800 #define FSNOTIFY_MARK_FLAG_WEAK_FSID 0x1000 unsigned int flags; /* flags [mark->lock] */ }; #ifdef CONFIG_FSNOTIFY /* called from the vfs helpers */ /* main fsnotify call to send events */ extern int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *name, struct inode *inode, u32 cookie); extern int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type); extern void __fsnotify_inode_delete(struct inode *inode); extern void __fsnotify_vfsmount_delete(struct vfsmount *mnt); extern void fsnotify_sb_delete(struct super_block *sb); extern void fsnotify_sb_free(struct super_block *sb); extern u32 fsnotify_get_cookie(void); static inline __u32 fsnotify_parent_needed_mask(__u32 mask) { /* FS_EVENT_ON_CHILD is set on marks that want parent/name info */ if (!(mask & FS_EVENT_ON_CHILD)) return 0; /* * This object might be watched by a mark that cares about parent/name * info, does it care about the specific set of events that can be * reported with parent/name info? */ return mask & FS_EVENTS_POSS_TO_PARENT; } static inline int fsnotify_inode_watches_children(struct inode *inode) { __u32 parent_mask = READ_ONCE(inode->i_fsnotify_mask); /* FS_EVENT_ON_CHILD is set if the inode may care */ if (!(parent_mask & FS_EVENT_ON_CHILD)) return 0; /* this inode might care about child events, does it care about the * specific set of events that can happen on a child? */ return parent_mask & FS_EVENTS_POSS_ON_CHILD; } /* * Update the dentry with a flag indicating the interest of its parent to receive * filesystem events when those events happens to this dentry->d_inode. */ static inline void fsnotify_update_flags(struct dentry *dentry) { assert_spin_locked(&dentry->d_lock); /* * Serialisation of setting PARENT_WATCHED on the dentries is provided * by d_lock. If inotify_inode_watched changes after we have taken * d_lock, the following fsnotify_set_children_dentry_flags call will * find our entry, so it will spin until we complete here, and update * us with the new state. */ if (fsnotify_inode_watches_children(dentry->d_parent->d_inode)) dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED; else dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED; } /* called from fsnotify listeners, such as fanotify or dnotify */ /* create a new group */ extern struct fsnotify_group *fsnotify_alloc_group( const struct fsnotify_ops *ops, int flags); /* get reference to a group */ extern void fsnotify_get_group(struct fsnotify_group *group); /* drop reference on a group from fsnotify_alloc_group */ extern void fsnotify_put_group(struct fsnotify_group *group); /* group destruction begins, stop queuing new events */ extern void fsnotify_group_stop_queueing(struct fsnotify_group *group); /* destroy group */ extern void fsnotify_destroy_group(struct fsnotify_group *group); /* fasync handler function */ extern int fsnotify_fasync(int fd, struct file *file, int on); /* Free event from memory */ extern void fsnotify_destroy_event(struct fsnotify_group *group, struct fsnotify_event *event); /* attach the event to the group notification queue */ extern int fsnotify_insert_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct fsnotify_group *, struct fsnotify_event *), void (*insert)(struct fsnotify_group *, struct fsnotify_event *)); static inline int fsnotify_add_event(struct fsnotify_group *group, struct fsnotify_event *event, int (*merge)(struct fsnotify_group *, struct fsnotify_event *)) { return fsnotify_insert_event(group, event, merge, NULL); } /* Queue overflow event to a notification group */ static inline void fsnotify_queue_overflow(struct fsnotify_group *group) { fsnotify_add_event(group, group->overflow_event, NULL); } static inline bool fsnotify_is_overflow_event(u32 mask) { return mask & FS_Q_OVERFLOW; } static inline bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group) { assert_spin_locked(&group->notification_lock); return list_empty(&group->notification_list); } extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); /* return, but do not dequeue the first event on the notification queue */ extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group); /* return AND dequeue the first event on the notification queue */ extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group); /* Remove event queued in the notification list */ extern void fsnotify_remove_queued_event(struct fsnotify_group *group, struct fsnotify_event *event); /* functions used to manipulate the marks attached to inodes */ /* * Canonical "ignore mask" including event flags. * * Note the subtle semantic difference from the legacy ->ignored_mask. * ->ignored_mask traditionally only meant which events should be ignored, * while ->ignore_mask also includes flags regarding the type of objects on * which events should be ignored. */ static inline __u32 fsnotify_ignore_mask(struct fsnotify_mark *mark) { __u32 ignore_mask = mark->ignore_mask; /* The event flags in ignore mask take effect */ if (mark->flags & FSNOTIFY_MARK_FLAG_HAS_IGNORE_FLAGS) return ignore_mask; /* * Legacy behavior: * - Always ignore events on dir * - Ignore events on child if parent is watching children */ ignore_mask |= FS_ISDIR; ignore_mask &= ~FS_EVENT_ON_CHILD; ignore_mask |= mark->mask & FS_EVENT_ON_CHILD; return ignore_mask; } /* Legacy ignored_mask - only event types to ignore */ static inline __u32 fsnotify_ignored_events(struct fsnotify_mark *mark) { return mark->ignore_mask & ALL_FSNOTIFY_EVENTS; } /* * Check if mask (or ignore mask) should be applied depending if victim is a * directory and whether it is reported to a watching parent. */ static inline bool fsnotify_mask_applicable(__u32 mask, bool is_dir, int iter_type) { /* Should mask be applied to a directory? */ if (is_dir && !(mask & FS_ISDIR)) return false; /* Should mask be applied to a child? */ if (iter_type == FSNOTIFY_ITER_TYPE_PARENT && !(mask & FS_EVENT_ON_CHILD)) return false; return true; } /* * Effective ignore mask taking into account if event victim is a * directory and whether it is reported to a watching parent. */ static inline __u32 fsnotify_effective_ignore_mask(struct fsnotify_mark *mark, bool is_dir, int iter_type) { __u32 ignore_mask = fsnotify_ignored_events(mark); if (!ignore_mask) return 0; /* For non-dir and non-child, no need to consult the event flags */ if (!is_dir && iter_type != FSNOTIFY_ITER_TYPE_PARENT) return ignore_mask; ignore_mask = fsnotify_ignore_mask(mark); if (!fsnotify_mask_applicable(ignore_mask, is_dir, iter_type)) return 0; return ignore_mask & ALL_FSNOTIFY_EVENTS; } /* Get mask for calculating object interest taking ignore mask into account */ static inline __u32 fsnotify_calc_mask(struct fsnotify_mark *mark) { __u32 mask = mark->mask; if (!fsnotify_ignored_events(mark)) return mask; /* Interest in FS_MODIFY may be needed for clearing ignore mask */ if (!(mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY)) mask |= FS_MODIFY; /* * If mark is interested in ignoring events on children, the object must * show interest in those events for fsnotify_parent() to notice it. */ return mask | mark->ignore_mask; } /* Get mask of events for a list of marks */ extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn); /* Calculate mask of events for a list of marks */ extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn); extern void fsnotify_init_mark(struct fsnotify_mark *mark, struct fsnotify_group *group); /* Find mark belonging to given group in the list of marks */ struct fsnotify_mark *fsnotify_find_mark(void *obj, unsigned int obj_type, struct fsnotify_group *group); /* attach the mark to the object */ int fsnotify_add_mark(struct fsnotify_mark *mark, void *obj, unsigned int obj_type, int add_flags); int fsnotify_add_mark_locked(struct fsnotify_mark *mark, void *obj, unsigned int obj_type, int add_flags); /* attach the mark to the inode */ static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, struct inode *inode, int add_flags) { return fsnotify_add_mark(mark, inode, FSNOTIFY_OBJ_TYPE_INODE, add_flags); } static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, struct inode *inode, int add_flags) { return fsnotify_add_mark_locked(mark, inode, FSNOTIFY_OBJ_TYPE_INODE, add_flags); } static inline struct fsnotify_mark *fsnotify_find_inode_mark( struct inode *inode, struct fsnotify_group *group) { return fsnotify_find_mark(inode, FSNOTIFY_OBJ_TYPE_INODE, group); } /* given a group and a mark, flag mark to be freed when all references are dropped */ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, struct fsnotify_group *group); /* detach mark from inode / mount list, group list, drop inode reference */ extern void fsnotify_detach_mark(struct fsnotify_mark *mark); /* free mark */ extern void fsnotify_free_mark(struct fsnotify_mark *mark); /* Wait until all marks queued for destruction are destroyed */ extern void fsnotify_wait_marks_destroyed(void); /* Clear all of the marks of a group attached to a given object type */ extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int obj_type); /* run all the marks in a group, and clear all of the vfsmount marks */ static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group) { fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT); } /* run all the marks in a group, and clear all of the inode marks */ static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group) { fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE); } /* run all the marks in a group, and clear all of the sn marks */ static inline void fsnotify_clear_sb_marks_by_group(struct fsnotify_group *group) { fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_SB); } extern void fsnotify_get_mark(struct fsnotify_mark *mark); extern void fsnotify_put_mark(struct fsnotify_mark *mark); extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); static inline void fsnotify_init_event(struct fsnotify_event *event) { INIT_LIST_HEAD(&event->list); } #else static inline int fsnotify(__u32 mask, const void *data, int data_type, struct inode *dir, const struct qstr *name, struct inode *inode, u32 cookie) { return 0; } static inline int __fsnotify_parent(struct dentry *dentry, __u32 mask, const void *data, int data_type) { return 0; } static inline void __fsnotify_inode_delete(struct inode *inode) {} static inline void __fsnotify_vfsmount_delete(struct vfsmount *mnt) {} static inline void fsnotify_sb_delete(struct super_block *sb) {} static inline void fsnotify_sb_free(struct super_block *sb) {} static inline void fsnotify_update_flags(struct dentry *dentry) {} static inline u32 fsnotify_get_cookie(void) { return 0; } static inline void fsnotify_unmount_inodes(struct super_block *sb) {} #endif /* CONFIG_FSNOTIFY */ #endif /* __KERNEL __ */ #endif /* __LINUX_FSNOTIFY_BACKEND_H */
// SPDX-License-Identifier: GPL-2.0-only /* * am335x-sbc-t335.dts - Device Tree file for Compulab SBC-T335 * * Copyright (C) 2014 - 2015 CompuLab Ltd. - https://www.compulab.co.il/ */ #include "am335x-cm-t335.dts" / { model = "CompuLab CM-T335 on SB-T335"; compatible = "compulab,sbc-t335", "compulab,cm-t335", "ti,am33xx"; /* DRM display driver */ panel { compatible = "ti,tilcdc,panel"; status = "okay"; pinctrl-names = "default", "sleep"; pinctrl-0 = <&lcd_pins_default>; pinctrl-1 = <&lcd_pins_sleep>; panel-info { ac-bias = <255>; ac-bias-intrpt = <0>; dma-burst-sz = <16>; bpp = <32>; fdd = <0x80>; sync-edge = <0>; sync-ctrl = <1>; raster-order = <0>; fifo-th = <0>; }; display-timings { /* Timing selection performed by U-Boot */ timing0: lcd {/* 800x480p62 */ clock-frequency = <30000000>; hactive = <800>; vactive = <480>; hfront-porch = <39>; hback-porch = <39>; hsync-len = <47>; vback-porch = <29>; vfront-porch = <13>; vsync-len = <2>; hsync-active = <1>; vsync-active = <1>; }; timing1: dvi { /* 1024x768p60 */ clock-frequency = <65000000>; hactive = <1024>; hfront-porch = <24>; hback-porch = <160>; hsync-len = <136>; vactive = <768>; vfront-porch = <3>; vback-porch = <29>; vsync-len = <6>; hsync-active = <0>; vsync-active = <0>; }; }; }; }; &am33xx_pinmux { /* Display */ lcd_pins_default: lcd-default-pins { pinctrl-single,pins = < /* gpmc_ad8.lcd_data23 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad9.lcd_data22 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad10.lcd_data21 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad11.lcd_data20 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad12.lcd_data19 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad13.lcd_data18 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad14.lcd_data17 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_OUTPUT, MUX_MODE1) /* gpmc_ad15.lcd_data16 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_OUTPUT, MUX_MODE1) AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_OUTPUT, MUX_MODE0) AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_OUTPUT, MUX_MODE0) >; }; lcd_pins_sleep: lcd-sleep-pins { pinctrl-single,pins = < /* gpmc_ad8.lcd_data23 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD8, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad9.lcd_data22 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD9, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad10.lcd_data21 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD10, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad11.lcd_data20 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD11, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad12.lcd_data19 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD12, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad13.lcd_data18 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD13, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad14.lcd_data17 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD14, PIN_INPUT_PULLDOWN, MUX_MODE7) /* gpmc_ad15.lcd_data16 */ AM33XX_PADCONF(AM335X_PIN_GPMC_AD15, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA0, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA1, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA2, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA3, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA4, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA5, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA6, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA7, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA8, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA9, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA10, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA11, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA12, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA13, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA14, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_DATA15, PULL_DISABLE, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_VSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_HSYNC, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_PCLK, PIN_INPUT_PULLDOWN, MUX_MODE7) AM33XX_PADCONF(AM335X_PIN_LCD_AC_BIAS_EN, PIN_INPUT_PULLDOWN, MUX_MODE7) >; }; }; &i2c0 { /* GPIO extender */ gpio_ext: pca9555@26 { compatible = "nxp,pca9555"; pinctrl-names = "default"; gpio-controller; #gpio-cells = <2>; reg = <0x26>; dvi-ena-hog { gpio-hog; gpios = <13 GPIO_ACTIVE_HIGH>; output-high; line-name = "dvi-enable"; }; lcd-ena-hog { gpio-hog; gpios = <11 GPIO_ACTIVE_HIGH>; output-high; line-name = "lcd-enable"; }; }; }; /* Display */ &lcdc { status = "okay"; };
// SPDX-License-Identifier: GPL-2.0-only OR MIT /* Copyright (c) 2023 Imagination Technologies Ltd. */ #include "pvr_device.h" #include "pvr_fw.h" #include "pvr_fw_info.h" #include "pvr_fw_meta.h" #include "pvr_gem.h" #include "pvr_rogue_cr_defs.h" #include "pvr_rogue_meta.h" #include "pvr_vm.h" #include <linux/compiler.h> #include <linux/delay.h> #include <linux/firmware.h> #include <linux/ktime.h> #include <linux/types.h> #define ROGUE_FW_HEAP_META_SHIFT 25 /* 32 MB */ #define POLL_TIMEOUT_USEC 1000000 /** * pvr_meta_cr_read32() - Read a META register via the Slave Port * @pvr_dev: Device pointer. * @reg_addr: Address of register to read. * @reg_value_out: Pointer to location to store register value. * * Returns: * * 0 on success, or * * Any error returned by pvr_cr_poll_reg32(). */ int pvr_meta_cr_read32(struct pvr_device *pvr_dev, u32 reg_addr, u32 *reg_value_out) { int err; /* Wait for Slave Port to be Ready. */ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1, ROGUE_CR_META_SP_MSLVCTRL1_READY_EN | ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ROGUE_CR_META_SP_MSLVCTRL1_READY_EN | ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, POLL_TIMEOUT_USEC); if (err) return err; /* Issue a Read. */ pvr_cr_write32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL0, reg_addr | ROGUE_CR_META_SP_MSLVCTRL0_RD_EN); (void)pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL0); /* Fence write. */ /* Wait for Slave Port to be Ready. */ err = pvr_cr_poll_reg32(pvr_dev, ROGUE_CR_META_SP_MSLVCTRL1, ROGUE_CR_META_SP_MSLVCTRL1_READY_EN | ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, ROGUE_CR_META_SP_MSLVCTRL1_READY_EN | ROGUE_CR_META_SP_MSLVCTRL1_GBLPORT_IDLE_EN, POLL_TIMEOUT_USEC); if (err) return err; *reg_value_out = pvr_cr_read32(pvr_dev, ROGUE_CR_META_SP_MSLVDATAX); return 0; } static int pvr_meta_wrapper_init(struct pvr_device *pvr_dev) { u64 garten_config; /* Configure META to Master boot. */ pvr_cr_write64(pvr_dev, ROGUE_CR_META_BOOT, ROGUE_CR_META_BOOT_MODE_EN); /* Set Garten IDLE to META idle and Set the Garten Wrapper BIF Fence address. */ /* Garten IDLE bit controlled by META. */ garten_config = ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_IDLE_CTRL_META; /* The fence addr is set during the fw init sequence. */ /* Set PC = 0 for fences. */ garten_config &= ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_CLRMSK; garten_config |= (u64)MMU_CONTEXT_MAPPING_FWPRIV << ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_PC_BASE_SHIFT; /* Set SLC DM=META. */ garten_config |= ((u64)ROGUE_FW_SEGMMU_META_BIFDM_ID) << ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG_FENCE_DM_SHIFT; pvr_cr_write64(pvr_dev, ROGUE_CR_MTS_GARTEN_WRAPPER_CONFIG, garten_config); return 0; } static __always_inline void add_boot_arg(u32 **boot_conf, u32 param, u32 data) { *(*boot_conf)++ = param; *(*boot_conf)++ = data; } static int meta_ldr_cmd_loadmem(struct drm_device *drm_dev, const u8 *fw, struct rogue_meta_ldr_l1_data_blk *l1_data, u32 coremem_size, u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr, const u32 fw_size) { struct rogue_meta_ldr_l2_data_blk *l2_block = (struct rogue_meta_ldr_l2_data_blk *)(fw + l1_data->cmd_data[1]); struct pvr_device *pvr_dev = to_pvr_device(drm_dev); u32 offset = l1_data->cmd_data[0]; u32 data_size; void *write_addr; int err; /* Verify header is within bounds. */ if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size) return -EINVAL; data_size = l2_block->length - 6 /* L2 Tag length and checksum */; /* Verify data is within bounds. */ if (((u8 *)l2_block->block_data - fw) >= fw_size || ((((u8 *)l2_block->block_data) + data_size) - fw) >= fw_size) return -EINVAL; if (!ROGUE_META_IS_COREMEM_CODE(offset, coremem_size) && !ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) { /* Global range is aliased to local range */ offset &= ~META_MEM_GLOBAL_RANGE_BIT; } err = pvr_fw_find_mmu_segment(pvr_dev, offset, data_size, fw_code_ptr, fw_data_ptr, fw_core_code_ptr, fw_core_data_ptr, &write_addr); if (err) { drm_err(drm_dev, "Addr 0x%x (size: %d) not found in any firmware segment", offset, data_size); return err; } memcpy(write_addr, l2_block->block_data, data_size); return 0; } static int meta_ldr_cmd_zeromem(struct drm_device *drm_dev, struct rogue_meta_ldr_l1_data_blk *l1_data, u32 coremem_size, u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr) { struct pvr_device *pvr_dev = to_pvr_device(drm_dev); u32 offset = l1_data->cmd_data[0]; u32 byte_count = l1_data->cmd_data[1]; void *write_addr; int err; if (ROGUE_META_IS_COREMEM_DATA(offset, coremem_size)) { /* cannot zero coremem directly */ return 0; } /* Global range is aliased to local range */ offset &= ~META_MEM_GLOBAL_RANGE_BIT; err = pvr_fw_find_mmu_segment(pvr_dev, offset, byte_count, fw_code_ptr, fw_data_ptr, fw_core_code_ptr, fw_core_data_ptr, &write_addr); if (err) { drm_err(drm_dev, "Addr 0x%x (size: %d) not found in any firmware segment", offset, byte_count); return err; } memset(write_addr, 0, byte_count); return 0; } static int meta_ldr_cmd_config(struct drm_device *drm_dev, const u8 *fw, struct rogue_meta_ldr_l1_data_blk *l1_data, const u32 fw_size, u32 **boot_conf_ptr) { struct rogue_meta_ldr_l2_data_blk *l2_block = (struct rogue_meta_ldr_l2_data_blk *)(fw + l1_data->cmd_data[0]); struct rogue_meta_ldr_cfg_blk *config_command; u32 l2_block_size; u32 curr_block_size = 0; u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL; /* Verify block header is within bounds. */ if (((u8 *)l2_block - fw) >= fw_size || ((u8 *)(l2_block + 1) - fw) >= fw_size) return -EINVAL; l2_block_size = l2_block->length - 6 /* L2 Tag length and checksum */; config_command = (struct rogue_meta_ldr_cfg_blk *)l2_block->block_data; if (((u8 *)config_command - fw) >= fw_size || ((((u8 *)config_command) + l2_block_size) - fw) >= fw_size) return -EINVAL; while (l2_block_size >= 12) { if (config_command->type != ROGUE_META_LDR_CFG_WRITE) return -EINVAL; /* * Only write to bootloader if we got a valid pointer to the FW * code allocation. */ if (boot_conf) { u32 register_offset = config_command->block_data[0]; u32 register_value = config_command->block_data[1]; /* Do register write */ add_boot_arg(&boot_conf, register_offset, register_value); } curr_block_size = 12; l2_block_size -= curr_block_size; config_command = (struct rogue_meta_ldr_cfg_blk *)((uintptr_t)config_command + curr_block_size); } if (boot_conf_ptr) *boot_conf_ptr = boot_conf; return 0; } /** * process_ldr_command_stream() - Process LDR firmware image and populate * firmware sections * @pvr_dev: Device pointer. * @fw: Pointer to firmware image. * @fw_code_ptr: Pointer to FW code section. * @fw_data_ptr: Pointer to FW data section. * @fw_core_code_ptr: Pointer to FW coremem code section. * @fw_core_data_ptr: Pointer to FW coremem data section. * @boot_conf_ptr: Pointer to boot config argument pointer. * * Returns : * * 0 on success, or * * -EINVAL on any error in LDR command stream. */ static int process_ldr_command_stream(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr, u32 **boot_conf_ptr) { struct drm_device *drm_dev = from_pvr_device(pvr_dev); struct rogue_meta_ldr_block_hdr *ldr_header = (struct rogue_meta_ldr_block_hdr *)fw; struct rogue_meta_ldr_l1_data_blk *l1_data = (struct rogue_meta_ldr_l1_data_blk *)(fw + ldr_header->sl_data); const u32 fw_size = pvr_dev->fw_dev.firmware->size; int err; u32 *boot_conf = boot_conf_ptr ? *boot_conf_ptr : NULL; u32 coremem_size; err = PVR_FEATURE_VALUE(pvr_dev, meta_coremem_size, &coremem_size); if (err) return err; coremem_size *= SZ_1K; while (l1_data) { /* Verify block header is within bounds. */ if (((u8 *)l1_data - fw) >= fw_size || ((u8 *)(l1_data + 1) - fw) >= fw_size) return -EINVAL; if (ROGUE_META_LDR_BLK_IS_COMMENT(l1_data->cmd)) { /* Don't process comment blocks */ goto next_block; } switch (l1_data->cmd & ROGUE_META_LDR_CMD_MASK) case ROGUE_META_LDR_CMD_LOADMEM: { err = meta_ldr_cmd_loadmem(drm_dev, fw, l1_data, coremem_size, fw_code_ptr, fw_data_ptr, fw_core_code_ptr, fw_core_data_ptr, fw_size); if (err) return err; break; case ROGUE_META_LDR_CMD_START_THREADS: /* Don't process this block */ break; case ROGUE_META_LDR_CMD_ZEROMEM: err = meta_ldr_cmd_zeromem(drm_dev, l1_data, coremem_size, fw_code_ptr, fw_data_ptr, fw_core_code_ptr, fw_core_data_ptr); if (err) return err; break; case ROGUE_META_LDR_CMD_CONFIG: err = meta_ldr_cmd_config(drm_dev, fw, l1_data, fw_size, &boot_conf); if (err) return err; break; default: return -EINVAL; } next_block: if (l1_data->next == 0xFFFFFFFF) break; l1_data = (struct rogue_meta_ldr_l1_data_blk *)(fw + l1_data->next); } if (boot_conf_ptr) *boot_conf_ptr = boot_conf; return 0; } static void configure_seg_id(u64 seg_out_addr, u32 seg_base, u32 seg_limit, u32 seg_id, u32 **boot_conf_ptr) { u32 seg_out_addr0 = seg_out_addr & 0x00000000FFFFFFFFUL; u32 seg_out_addr1 = (seg_out_addr >> 32) & 0x00000000FFFFFFFFUL; u32 *boot_conf = *boot_conf_ptr; /* META segments have a minimum size. */ u32 limit_off = max(seg_limit, ROGUE_FW_SEGMMU_ALIGN); /* The limit is an offset, therefore off = size - 1. */ limit_off -= 1; seg_base |= ROGUE_FW_SEGMMU_ALLTHRS_WRITEABLE; add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_BASE(seg_id), seg_base); add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_LIMIT(seg_id), limit_off); add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA0(seg_id), seg_out_addr0); add_boot_arg(&boot_conf, META_CR_MMCU_SEGMENT_N_OUTA1(seg_id), seg_out_addr1); *boot_conf_ptr = boot_conf; } static u64 get_fw_obj_gpu_addr(struct pvr_fw_object *fw_obj) { struct pvr_device *pvr_dev = to_pvr_device(gem_from_pvr_gem(fw_obj->gem)->dev); struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; return fw_obj->fw_addr_offset + fw_dev->fw_heap_info.gpu_addr; } static void configure_seg_mmu(struct pvr_device *pvr_dev, u32 **boot_conf_ptr) { const struct pvr_fw_layout_entry *layout_entries = pvr_dev->fw_dev.layout_entries; u32 num_layout_entries = pvr_dev->fw_dev.header->layout_entry_num; u64 seg_out_addr_top; u32 i; seg_out_addr_top = ROGUE_FW_SEGMMU_OUTADDR_TOP_SLC(MMU_CONTEXT_MAPPING_FWPRIV, ROGUE_FW_SEGMMU_META_BIFDM_ID); for (i = 0; i < num_layout_entries; i++) { /* * FW code is using the bootloader segment which is already * configured on boot. FW coremem code and data don't use the * segment MMU. Only the FW data segment needs to be configured. */ if (layout_entries[i].type == FW_DATA) { u32 seg_id = ROGUE_FW_SEGMMU_DATA_ID; u64 seg_out_addr = get_fw_obj_gpu_addr(pvr_dev->fw_dev.mem.data_obj); seg_out_addr += layout_entries[i].alloc_offset; seg_out_addr |= seg_out_addr_top; /* Write the sequence to the bootldr. */ configure_seg_id(seg_out_addr, layout_entries[i].base_addr, layout_entries[i].alloc_size, seg_id, boot_conf_ptr); break; } } } static void configure_meta_caches(u32 **boot_conf_ptr) { u32 *boot_conf = *boot_conf_ptr; u32 d_cache_t0, i_cache_t0; u32 d_cache_t1, i_cache_t1; u32 d_cache_t2, i_cache_t2; u32 d_cache_t3, i_cache_t3; /* Initialise I/Dcache settings */ d_cache_t0 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; d_cache_t1 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; d_cache_t2 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; d_cache_t3 = META_CR_SYSC_DCPARTX_CACHED_WRITE_ENABLE; i_cache_t0 = 0; i_cache_t1 = 0; i_cache_t2 = 0; i_cache_t3 = 0; d_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; i_cache_t0 |= META_CR_SYSC_XCPARTX_LOCAL_ADDR_FULL_CACHE; /* Local region MMU enhanced bypass: WIN-3 mode for code and data caches */ add_boot_arg(&boot_conf, META_CR_MMCU_LOCAL_EBCTRL, META_CR_MMCU_LOCAL_EBCTRL_ICWIN | META_CR_MMCU_LOCAL_EBCTRL_DCWIN); /* Data cache partitioning thread 0 to 3 */ add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(0), d_cache_t0); add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(1), d_cache_t1); add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(2), d_cache_t2); add_boot_arg(&boot_conf, META_CR_SYSC_DCPART(3), d_cache_t3); /* Enable data cache hits */ add_boot_arg(&boot_conf, META_CR_MMCU_DCACHE_CTRL, META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); /* Instruction cache partitioning thread 0 to 3 */ add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(0), i_cache_t0); add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(1), i_cache_t1); add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(2), i_cache_t2); add_boot_arg(&boot_conf, META_CR_SYSC_ICPART(3), i_cache_t3); /* Enable instruction cache hits */ add_boot_arg(&boot_conf, META_CR_MMCU_ICACHE_CTRL, META_CR_MMCU_XCACHE_CTRL_CACHE_HITS_EN); add_boot_arg(&boot_conf, 0x040000C0, 0); *boot_conf_ptr = boot_conf; } static int pvr_meta_fw_process(struct pvr_device *pvr_dev, const u8 *fw, u8 *fw_code_ptr, u8 *fw_data_ptr, u8 *fw_core_code_ptr, u8 *fw_core_data_ptr, u32 core_code_alloc_size) { struct pvr_fw_device *fw_dev = &pvr_dev->fw_dev; u32 *boot_conf; int err; boot_conf = ((u32 *)fw_code_ptr) + ROGUE_FW_BOOTLDR_CONF_OFFSET; /* Slave port and JTAG accesses are privileged. */ add_boot_arg(&boot_conf, META_CR_SYSC_JTAG_THREAD, META_CR_SYSC_JTAG_THREAD_PRIV_EN); configure_seg_mmu(pvr_dev, &boot_conf); /* Populate FW sections from LDR image. */ err = process_ldr_command_stream(pvr_dev, fw, fw_code_ptr, fw_data_ptr, fw_core_code_ptr, fw_core_data_ptr, &boot_conf); if (err) return err; configure_meta_caches(&boot_conf); /* End argument list. */ add_boot_arg(&boot_conf, 0, 0); if (fw_dev->mem.core_code_obj) { u32 core_code_fw_addr; pvr_fw_object_get_fw_addr(fw_dev->mem.core_code_obj, &core_code_fw_addr); add_boot_arg(&boot_conf, core_code_fw_addr, core_code_alloc_size); } else { add_boot_arg(&boot_conf, 0, 0); } /* None of the cores supported by this driver have META DMA. */ add_boot_arg(&boot_conf, 0, 0); return 0; } static int pvr_meta_init(struct pvr_device *pvr_dev) { pvr_fw_heap_info_init(pvr_dev, ROGUE_FW_HEAP_META_SHIFT, 0); return 0; } static u32 pvr_meta_get_fw_addr_with_offset(struct pvr_fw_object *fw_obj, u32 offset) { u32 fw_addr = fw_obj->fw_addr_offset + offset + ROGUE_FW_SEGMMU_DATA_BASE_ADDRESS; /* META cacheability is determined by address. */ if (fw_obj->gem->flags & PVR_BO_FW_FLAGS_DEVICE_UNCACHED) fw_addr |= ROGUE_FW_SEGMMU_DATA_META_UNCACHED | ROGUE_FW_SEGMMU_DATA_VIVT_SLC_UNCACHED; return fw_addr; } static int pvr_meta_vm_map(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) { struct pvr_gem_object *pvr_obj = fw_obj->gem; return pvr_vm_map(pvr_dev->kernel_vm_ctx, pvr_obj, 0, fw_obj->fw_mm_node.start, pvr_gem_object_size(pvr_obj)); } static void pvr_meta_vm_unmap(struct pvr_device *pvr_dev, struct pvr_fw_object *fw_obj) { pvr_vm_unmap(pvr_dev->kernel_vm_ctx, fw_obj->fw_mm_node.start, fw_obj->fw_mm_node.size); } static bool pvr_meta_has_fixed_data_addr(void) { return false; } const struct pvr_fw_defs pvr_fw_defs_meta = { .init = pvr_meta_init, .fw_process = pvr_meta_fw_process, .vm_map = pvr_meta_vm_map, .vm_unmap = pvr_meta_vm_unmap, .get_fw_addr_with_offset = pvr_meta_get_fw_addr_with_offset, .wrapper_init = pvr_meta_wrapper_init, .has_fixed_data_addr = pvr_meta_has_fixed_data_addr, .irq = { .enable_reg = ROGUE_CR_META_SP_MSLVIRQENABLE, .status_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS, .clear_reg = ROGUE_CR_META_SP_MSLVIRQSTATUS, .event_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_EN, .clear_mask = ROGUE_CR_META_SP_MSLVIRQSTATUS_TRIGVECT2_CLRMSK, }, };
/* * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef _VEGA12_SMUMANAGER_H_ #define _VEGA12_SMUMANAGER_H_ #include "hwmgr.h" #include "vega12/smu9_driver_if.h" #include "vega12_hwmgr.h" struct smu_table_entry { uint32_t version; uint32_t size; uint64_t mc_addr; void *table; struct amdgpu_bo *handle; }; struct smu_table_array { struct smu_table_entry entry[TABLE_COUNT]; }; struct vega12_smumgr { struct smu_table_array smu_tables; }; #define SMU_FEATURES_LOW_MASK 0x00000000FFFFFFFF #define SMU_FEATURES_LOW_SHIFT 0 #define SMU_FEATURES_HIGH_MASK 0xFFFFFFFF00000000 #define SMU_FEATURES_HIGH_SHIFT 32 int vega12_enable_smc_features(struct pp_hwmgr *hwmgr, bool enable, uint64_t feature_mask); int vega12_get_enabled_smc_features(struct pp_hwmgr *hwmgr, uint64_t *features_enabled); #endif
/* SPDX-License-Identifier: GPL-2.0+ */ /************************************************************************ * * 16654.H Definitions for 16C654 UART used on EdgePorts * * Copyright (C) 1998 Inside Out Networks, Inc. * ************************************************************************/ #if !defined(_16654_H) #define _16654_H /************************************************************************ * * D e f i n e s / T y p e d e f s * ************************************************************************/ // // UART register numbers // Numbers 0-7 are passed to the Edgeport directly. Numbers 8 and // above are used internally to indicate that we must enable access // to them via LCR bit 0x80 or LCR = 0xBF. // The register number sent to the Edgeport is then (x & 0x7). // // Driver must not access registers that affect operation of the // the EdgePort firmware -- that includes THR, RHR, IER, FCR. #define THR 0 // ! Transmit Holding Register (Write) #define RDR 0 // ! Receive Holding Register (Read) #define IER 1 // ! Interrupt Enable Register #define FCR 2 // ! Fifo Control Register (Write) #define ISR 2 // Interrupt Status Register (Read) #define LCR 3 // Line Control Register #define MCR 4 // Modem Control Register #define LSR 5 // Line Status Register #define MSR 6 // Modem Status Register #define SPR 7 // ScratchPad Register #define DLL 8 // Bank2[ 0 ] Divisor Latch LSB #define DLM 9 // Bank2[ 1 ] Divisor Latch MSB #define EFR 10 // Bank2[ 2 ] Extended Function Register //efine unused 11 // Bank2[ 3 ] #define XON1 12 // Bank2[ 4 ] Xon-1 #define XON2 13 // Bank2[ 5 ] Xon-2 #define XOFF1 14 // Bank2[ 6 ] Xoff-1 #define XOFF2 15 // Bank2[ 7 ] Xoff-2 #define NUM_16654_REGS 16 #define IS_REG_2ND_BANK(x) ((x) >= 8) // // Bit definitions for each register // #define IER_RX 0x01 // Enable receive interrupt #define IER_TX 0x02 // Enable transmit interrupt #define IER_RXS 0x04 // Enable receive status interrupt #define IER_MDM 0x08 // Enable modem status interrupt #define IER_SLEEP 0x10 // Enable sleep mode #define IER_XOFF 0x20 // Enable s/w flow control (XOFF) interrupt #define IER_RTS 0x40 // Enable RTS interrupt #define IER_CTS 0x80 // Enable CTS interrupt #define IER_ENABLE_ALL 0xFF // Enable all ints #define FCR_FIFO_EN 0x01 // Enable FIFOs #define FCR_RXCLR 0x02 // Reset Rx FIFO #define FCR_TXCLR 0x04 // Reset Tx FIFO #define FCR_DMA_BLK 0x08 // Enable DMA block mode #define FCR_TX_LEVEL_MASK 0x30 // Mask for Tx FIFO Level #define FCR_TX_LEVEL_8 0x00 // Tx FIFO Level = 8 bytes #define FCR_TX_LEVEL_16 0x10 // Tx FIFO Level = 16 bytes #define FCR_TX_LEVEL_32 0x20 // Tx FIFO Level = 32 bytes #define FCR_TX_LEVEL_56 0x30 // Tx FIFO Level = 56 bytes #define FCR_RX_LEVEL_MASK 0xC0 // Mask for Rx FIFO Level #define FCR_RX_LEVEL_8 0x00 // Rx FIFO Level = 8 bytes #define FCR_RX_LEVEL_16 0x40 // Rx FIFO Level = 16 bytes #define FCR_RX_LEVEL_56 0x80 // Rx FIFO Level = 56 bytes #define FCR_RX_LEVEL_60 0xC0 // Rx FIFO Level = 60 bytes #define ISR_INT_MDM_STATUS 0x00 // Modem status int pending #define ISR_INT_NONE 0x01 // No interrupt pending #define ISR_INT_TXRDY 0x02 // Tx ready int pending #define ISR_INT_RXRDY 0x04 // Rx ready int pending #define ISR_INT_LINE_STATUS 0x06 // Line status int pending #define ISR_INT_RX_TIMEOUT 0x0C // Rx timeout int pending #define ISR_INT_RX_XOFF 0x10 // Rx Xoff int pending #define ISR_INT_RTS_CTS 0x20 // RTS/CTS change int pending #define ISR_FIFO_ENABLED 0xC0 // Bits set if FIFOs enabled #define ISR_INT_BITS_MASK 0x3E // Mask to isolate valid int causes #define LCR_BITS_5 0x00 // 5 bits/char #define LCR_BITS_6 0x01 // 6 bits/char #define LCR_BITS_7 0x02 // 7 bits/char #define LCR_BITS_8 0x03 // 8 bits/char #define LCR_BITS_MASK 0x03 // Mask for bits/char field #define LCR_STOP_1 0x00 // 1 stop bit #define LCR_STOP_1_5 0x04 // 1.5 stop bits (if 5 bits/char) #define LCR_STOP_2 0x04 // 2 stop bits (if 6-8 bits/char) #define LCR_STOP_MASK 0x04 // Mask for stop bits field #define LCR_PAR_NONE 0x00 // No parity #define LCR_PAR_ODD 0x08 // Odd parity #define LCR_PAR_EVEN 0x18 // Even parity #define LCR_PAR_MARK 0x28 // Force parity bit to 1 #define LCR_PAR_SPACE 0x38 // Force parity bit to 0 #define LCR_PAR_MASK 0x38 // Mask for parity field #define LCR_SET_BREAK 0x40 // Set Break condition #define LCR_DL_ENABLE 0x80 // Enable access to divisor latch #define LCR_ACCESS_EFR 0xBF // Load this value to access DLL,DLM, // and also the '654-only registers // EFR, XON1, XON2, XOFF1, XOFF2 #define MCR_DTR 0x01 // Assert DTR #define MCR_RTS 0x02 // Assert RTS #define MCR_OUT1 0x04 // Loopback only: Sets state of RI #define MCR_MASTER_IE 0x08 // Enable interrupt outputs #define MCR_LOOPBACK 0x10 // Set internal (digital) loopback mode #define MCR_XON_ANY 0x20 // Enable any char to exit XOFF mode #define MCR_IR_ENABLE 0x40 // Enable IrDA functions #define MCR_BRG_DIV_4 0x80 // Divide baud rate clk by /4 instead of /1 #define LSR_RX_AVAIL 0x01 // Rx data available #define LSR_OVER_ERR 0x02 // Rx overrun #define LSR_PAR_ERR 0x04 // Rx parity error #define LSR_FRM_ERR 0x08 // Rx framing error #define LSR_BREAK 0x10 // Rx break condition detected #define LSR_TX_EMPTY 0x20 // Tx Fifo empty #define LSR_TX_ALL_EMPTY 0x40 // Tx Fifo and shift register empty #define LSR_FIFO_ERR 0x80 // Rx Fifo contains at least 1 erred char #define EDGEPORT_MSR_DELTA_CTS 0x01 // CTS changed from last read #define EDGEPORT_MSR_DELTA_DSR 0x02 // DSR changed from last read #define EDGEPORT_MSR_DELTA_RI 0x04 // RI changed from 0 -> 1 #define EDGEPORT_MSR_DELTA_CD 0x08 // CD changed from last read #define EDGEPORT_MSR_CTS 0x10 // Current state of CTS #define EDGEPORT_MSR_DSR 0x20 // Current state of DSR #define EDGEPORT_MSR_RI 0x40 // Current state of RI #define EDGEPORT_MSR_CD 0x80 // Current state of CD // Tx Rx //------------------------------- #define EFR_SWFC_NONE 0x00 // None None #define EFR_SWFC_RX1 0x02 // None XOFF1 #define EFR_SWFC_RX2 0x01 // None XOFF2 #define EFR_SWFC_RX12 0x03 // None XOFF1 & XOFF2 #define EFR_SWFC_TX1 0x08 // XOFF1 None #define EFR_SWFC_TX1_RX1 0x0a // XOFF1 XOFF1 #define EFR_SWFC_TX1_RX2 0x09 // XOFF1 XOFF2 #define EFR_SWFC_TX1_RX12 0x0b // XOFF1 XOFF1 & XOFF2 #define EFR_SWFC_TX2 0x04 // XOFF2 None #define EFR_SWFC_TX2_RX1 0x06 // XOFF2 XOFF1 #define EFR_SWFC_TX2_RX2 0x05 // XOFF2 XOFF2 #define EFR_SWFC_TX2_RX12 0x07 // XOFF2 XOFF1 & XOFF2 #define EFR_SWFC_TX12 0x0c // XOFF1 & XOFF2 None #define EFR_SWFC_TX12_RX1 0x0e // XOFF1 & XOFF2 XOFF1 #define EFR_SWFC_TX12_RX2 0x0d // XOFF1 & XOFF2 XOFF2 #define EFR_SWFC_TX12_RX12 0x0f // XOFF1 & XOFF2 XOFF1 & XOFF2 #define EFR_TX_FC_MASK 0x0c // Mask to isolate Rx flow control #define EFR_TX_FC_NONE 0x00 // No Tx Xon/Xoff flow control #define EFR_TX_FC_X1 0x08 // Transmit Xon1/Xoff1 #define EFR_TX_FC_X2 0x04 // Transmit Xon2/Xoff2 #define EFR_TX_FC_X1_2 0x0c // Transmit Xon1&2/Xoff1&2 #define EFR_RX_FC_MASK 0x03 // Mask to isolate Rx flow control #define EFR_RX_FC_NONE 0x00 // No Rx Xon/Xoff flow control #define EFR_RX_FC_X1 0x02 // Receiver compares Xon1/Xoff1 #define EFR_RX_FC_X2 0x01 // Receiver compares Xon2/Xoff2 #define EFR_RX_FC_X1_2 0x03 // Receiver compares Xon1&2/Xoff1&2 #define EFR_SWFC_MASK 0x0F // Mask for software flow control field #define EFR_ENABLE_16654 0x10 // Enable 16C654 features #define EFR_SPEC_DETECT 0x20 // Enable special character detect interrupt #define EFR_AUTO_RTS 0x40 // Use RTS for Rx flow control #define EFR_AUTO_CTS 0x80 // Use CTS for Tx flow control #endif // if !defined(_16654_H)
// SPDX-License-Identifier: GPL-2.0-or-later /* * Board info for Asus X86 tablets which ship with Android as the factory image * and which have broken DSDT tables. The factory kernels shipped on these * devices typically have a bunch of things hardcoded, rather than specified * in their DSDT. * * Copyright (C) 2021-2023 Hans de Goede <[email protected]> */ #include <linux/gpio/machine.h> #include <linux/input.h> #include <linux/platform_device.h> #include "shared-psy-info.h" #include "x86-android-tablets.h" /* Asus ME176C and TF103C tablets shared data */ static struct gpiod_lookup_table int3496_gpo2_pin22_gpios = { .dev_id = "intel-int3496", .table = { GPIO_LOOKUP("INT33FC:02", 22, "id", GPIO_ACTIVE_HIGH), { } }, }; static const struct x86_gpio_button asus_me176c_tf103c_lid __initconst = { .button = { .code = SW_LID, .active_low = true, .desc = "lid_sw", .type = EV_SW, .wakeup = true, .debounce_interval = 50, }, .chip = "INT33FC:02", .pin = 12, }; /* Asus ME176C tablets have an Android factory image with everything hardcoded */ static const char * const asus_me176c_accel_mount_matrix[] = { "-1", "0", "0", "0", "1", "0", "0", "0", "1" }; static const struct property_entry asus_me176c_accel_props[] = { PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", asus_me176c_accel_mount_matrix), { } }; static const struct software_node asus_me176c_accel_node = { .properties = asus_me176c_accel_props, }; static const struct property_entry asus_me176c_bq24190_props[] = { PROPERTY_ENTRY_STRING_ARRAY_LEN("supplied-from", tusb1211_chg_det_psy, 1), PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_hv_4v35_battery_node), PROPERTY_ENTRY_U32("ti,system-minimum-microvolt", 3600000), PROPERTY_ENTRY_BOOL("omit-battery-class"), PROPERTY_ENTRY_BOOL("disable-reset"), { } }; static const struct software_node asus_me176c_bq24190_node = { .properties = asus_me176c_bq24190_props, }; static const struct property_entry asus_me176c_ug3105_props[] = { PROPERTY_ENTRY_STRING_ARRAY_LEN("supplied-from", bq24190_psy, 1), PROPERTY_ENTRY_REF("monitored-battery", &generic_lipo_hv_4v35_battery_node), PROPERTY_ENTRY_U32("upisemi,rsns-microohm", 10000), { } }; static const struct software_node asus_me176c_ug3105_node = { .properties = asus_me176c_ug3105_props, }; static const struct x86_i2c_client_info asus_me176c_i2c_clients[] __initconst = { { /* bq24297 battery charger */ .board_info = { .type = "bq24190", .addr = 0x6b, .dev_name = "bq24297", .swnode = &asus_me176c_bq24190_node, .platform_data = &bq24190_pdata, }, .adapter_path = "\\_SB_.I2C1", .irq_data = { .type = X86_ACPI_IRQ_TYPE_PMIC, .chip = "\\_SB_.I2C7.PMIC", .domain = DOMAIN_BUS_WAKEUP, .index = 0, }, }, { /* ug3105 battery monitor */ .board_info = { .type = "ug3105", .addr = 0x70, .dev_name = "ug3105", .swnode = &asus_me176c_ug3105_node, }, .adapter_path = "\\_SB_.I2C1", }, { /* ak09911 compass */ .board_info = { .type = "ak09911", .addr = 0x0c, .dev_name = "ak09911", }, .adapter_path = "\\_SB_.I2C5", }, { /* kxtj21009 accelerometer */ .board_info = { .type = "kxtj21009", .addr = 0x0f, .dev_name = "kxtj21009", .swnode = &asus_me176c_accel_node, }, .adapter_path = "\\_SB_.I2C5", .irq_data = { .type = X86_ACPI_IRQ_TYPE_APIC, .index = 0x44, .trigger = ACPI_EDGE_SENSITIVE, .polarity = ACPI_ACTIVE_LOW, }, }, { /* goodix touchscreen */ .board_info = { .type = "GDIX1001:00", .addr = 0x14, .dev_name = "goodix_ts", }, .adapter_path = "\\_SB_.I2C6", .irq_data = { .type = X86_ACPI_IRQ_TYPE_APIC, .index = 0x45, .trigger = ACPI_EDGE_SENSITIVE, .polarity = ACPI_ACTIVE_LOW, }, }, }; static const struct x86_serdev_info asus_me176c_serdevs[] __initconst = { { .ctrl_hid = "80860F0A", .ctrl_uid = "2", .ctrl_devname = "serial0", .serdev_hid = "BCM2E3A", }, }; static struct gpiod_lookup_table asus_me176c_goodix_gpios = { .dev_id = "i2c-goodix_ts", .table = { GPIO_LOOKUP("INT33FC:00", 60, "reset", GPIO_ACTIVE_HIGH), GPIO_LOOKUP("INT33FC:02", 28, "irq", GPIO_ACTIVE_HIGH), { } }, }; static struct gpiod_lookup_table * const asus_me176c_gpios[] = { &int3496_gpo2_pin22_gpios, &asus_me176c_goodix_gpios, NULL }; const struct x86_dev_info asus_me176c_info __initconst = { .i2c_client_info = asus_me176c_i2c_clients, .i2c_client_count = ARRAY_SIZE(asus_me176c_i2c_clients), .pdev_info = int3496_pdevs, .pdev_count = 1, .serdev_info = asus_me176c_serdevs, .serdev_count = ARRAY_SIZE(asus_me176c_serdevs), .gpio_button = &asus_me176c_tf103c_lid, .gpio_button_count = 1, .gpiod_lookup_tables = asus_me176c_gpios, .bat_swnode = &generic_lipo_hv_4v35_battery_node, .modules = bq24190_modules, }; /* Asus TF103C tablets have an Android factory image with everything hardcoded */ static const char * const asus_tf103c_accel_mount_matrix[] = { "0", "-1", "0", "-1", "0", "0", "0", "0", "1" }; static const struct property_entry asus_tf103c_accel_props[] = { PROPERTY_ENTRY_STRING_ARRAY("mount-matrix", asus_tf103c_accel_mount_matrix), { } }; static const struct software_node asus_tf103c_accel_node = { .properties = asus_tf103c_accel_props, }; static const struct property_entry asus_tf103c_touchscreen_props[] = { PROPERTY_ENTRY_STRING("compatible", "atmel,atmel_mxt_ts"), { } }; static const struct software_node asus_tf103c_touchscreen_node = { .properties = asus_tf103c_touchscreen_props, }; static const struct property_entry asus_tf103c_battery_props[] = { PROPERTY_ENTRY_STRING("compatible", "simple-battery"), PROPERTY_ENTRY_STRING("device-chemistry", "lithium-ion-polymer"), PROPERTY_ENTRY_U32("precharge-current-microamp", 256000), PROPERTY_ENTRY_U32("charge-term-current-microamp", 128000), PROPERTY_ENTRY_U32("constant-charge-current-max-microamp", 2048000), PROPERTY_ENTRY_U32("constant-charge-voltage-max-microvolt", 4208000), PROPERTY_ENTRY_U32("factory-internal-resistance-micro-ohms", 150000), { } }; static const struct software_node asus_tf103c_battery_node = { .properties = asus_tf103c_battery_props, }; static const struct property_entry asus_tf103c_bq24190_props[] = { PROPERTY_ENTRY_STRING_ARRAY_LEN("supplied-from", tusb1211_chg_det_psy, 1), PROPERTY_ENTRY_REF("monitored-battery", &asus_tf103c_battery_node), PROPERTY_ENTRY_U32("ti,system-minimum-microvolt", 3600000), PROPERTY_ENTRY_BOOL("omit-battery-class"), PROPERTY_ENTRY_BOOL("disable-reset"), { } }; static const struct software_node asus_tf103c_bq24190_node = { .properties = asus_tf103c_bq24190_props, }; static const struct property_entry asus_tf103c_ug3105_props[] = { PROPERTY_ENTRY_STRING_ARRAY_LEN("supplied-from", bq24190_psy, 1), PROPERTY_ENTRY_REF("monitored-battery", &asus_tf103c_battery_node), PROPERTY_ENTRY_U32("upisemi,rsns-microohm", 5000), { } }; static const struct software_node asus_tf103c_ug3105_node = { .properties = asus_tf103c_ug3105_props, }; static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst = { { /* bq24297 battery charger */ .board_info = { .type = "bq24190", .addr = 0x6b, .dev_name = "bq24297", .swnode = &asus_tf103c_bq24190_node, .platform_data = &bq24190_pdata, }, .adapter_path = "\\_SB_.I2C1", .irq_data = { .type = X86_ACPI_IRQ_TYPE_PMIC, .chip = "\\_SB_.I2C7.PMIC", .domain = DOMAIN_BUS_WAKEUP, .index = 0, }, }, { /* ug3105 battery monitor */ .board_info = { .type = "ug3105", .addr = 0x70, .dev_name = "ug3105", .swnode = &asus_tf103c_ug3105_node, }, .adapter_path = "\\_SB_.I2C1", }, { /* ak09911 compass */ .board_info = { .type = "ak09911", .addr = 0x0c, .dev_name = "ak09911", }, .adapter_path = "\\_SB_.I2C5", }, { /* kxtj21009 accelerometer */ .board_info = { .type = "kxtj21009", .addr = 0x0f, .dev_name = "kxtj21009", .swnode = &asus_tf103c_accel_node, }, .adapter_path = "\\_SB_.I2C5", }, { /* atmel touchscreen */ .board_info = { .type = "atmel_mxt_ts", .addr = 0x4a, .dev_name = "atmel_mxt_ts", .swnode = &asus_tf103c_touchscreen_node, }, .adapter_path = "\\_SB_.I2C6", .irq_data = { .type = X86_ACPI_IRQ_TYPE_GPIOINT, .chip = "INT33FC:02", .index = 28, .trigger = ACPI_EDGE_SENSITIVE, .polarity = ACPI_ACTIVE_LOW, .con_id = "atmel_mxt_ts_irq", }, }, }; static struct gpiod_lookup_table * const asus_tf103c_gpios[] = { &int3496_gpo2_pin22_gpios, NULL }; const struct x86_dev_info asus_tf103c_info __initconst = { .i2c_client_info = asus_tf103c_i2c_clients, .i2c_client_count = ARRAY_SIZE(asus_tf103c_i2c_clients), .pdev_info = int3496_pdevs, .pdev_count = 1, .gpio_button = &asus_me176c_tf103c_lid, .gpio_button_count = 1, .gpiod_lookup_tables = asus_tf103c_gpios, .bat_swnode = &asus_tf103c_battery_node, .modules = bq24190_modules, };
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* * Copyright (C) 2024 Kontron Europe GmbH * * Author: Michael Walle <[email protected]> */ /dts-v1/; #include "mt8195.dtsi" #include "mt6359.dtsi" #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> #include <dt-bindings/leds/common.h> #include <dt-bindings/pinctrl/mt8195-pinfunc.h> #include <dt-bindings/regulator/mediatek,mt6360-regulator.h> #include <dt-bindings/spmi/spmi.h> / { model = "Kontron 3.5\"-SBC-i1200"; compatible = "kontron,3-5-sbc-i1200", "mediatek,mt8395", "mediatek,mt8195"; aliases { mmc0 = &mmc0; mmc1 = &mmc1; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; serial3 = &uart4; serial4 = &uart0; }; chosen { stdout-path = "serial0:115200n8"; }; firmware { optee { compatible = "linaro,optee-tz"; method = "smc"; }; }; gpio-keys { compatible = "gpio-keys"; pinctrl-names = "default"; pinctrl-0 = <&gpio_keys_pins>; key-0 { gpios = <&pio 106 GPIO_ACTIVE_LOW>; label = "volume_up"; linux,code = <KEY_VOLUMEUP>; wakeup-source; debounce-interval = <15>; }; }; leds { compatible = "gpio-leds"; pinctrl-names = "default"; pinctrl-0 = <&led_pins>; led-0 { gpios = <&pio 107 GPIO_ACTIVE_HIGH>; default-state = "keep"; function = LED_FUNCTION_POWER; color = <LED_COLOR_ID_GREEN>; }; }; memory@40000000 { device_type = "memory"; reg = <0 0x40000000 0x0 0x80000000>; }; vsys: regulator-vsys { compatible = "regulator-fixed"; regulator-name = "vsys"; regulator-always-on; regulator-boot-on; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; }; reserved-memory { #address-cells = <2>; #size-cells = <2>; ranges; /* * 12 MiB reserved for OP-TEE (BL32) * +-----------------------+ 0x43e0_0000 * | SHMEM 2MiB | * +-----------------------+ 0x43c0_0000 * | | TA_RAM 8MiB | * + TZDRAM +--------------+ 0x4340_0000 * | | TEE_RAM 2MiB | * +-----------------------+ 0x4320_0000 */ optee_reserved: optee@43200000 { no-map; reg = <0 0x43200000 0 0x00c00000>; }; scp_mem: memory@50000000 { compatible = "shared-dma-pool"; reg = <0 0x50000000 0 0x2900000>; no-map; }; vpu_mem: memory@53000000 { compatible = "shared-dma-pool"; reg = <0 0x53000000 0 0x1400000>; /* 20 MB */ }; /* 2 MiB reserved for ARM Trusted Firmware (BL31) */ bl31_secmon_mem: memory@54600000 { no-map; reg = <0 0x54600000 0x0 0x200000>; }; snd_dma_mem: memory@60000000 { compatible = "shared-dma-pool"; reg = <0 0x60000000 0 0x1100000>; no-map; }; apu_mem: memory@62000000 { compatible = "shared-dma-pool"; reg = <0 0x62000000 0 0x1400000>; /* 20 MB */ }; }; thermal_sensor0: thermal-sensor-0 { compatible = "generic-adc-thermal"; #thermal-sensor-cells = <0>; io-channels = <&auxadc 0>; io-channel-names = "sensor-channel"; temperature-lookup-table = <(-25000) 1474 (-20000) 1374 (-15000) 1260 (-10000) 1134 (-5000) 1004 0 874 5000 750 10000 635 15000 532 20000 443 25000 367 30000 303 35000 250 40000 206 45000 170 50000 141 55000 117 60000 97 65000 81 70000 68 75000 57 80000 48 85000 41 90000 35 95000 30 100000 25 105000 22 110000 19 115000 16 120000 14 125000 12 130000 10 135000 9 140000 8 145000 7 150000 6>; }; thermal_sensor1: thermal-sensor-1 { compatible = "generic-adc-thermal"; #thermal-sensor-cells = <0>; io-channels = <&auxadc 1>; io-channel-names = "sensor-channel"; temperature-lookup-table = <(-25000) 1474 (-20000) 1374 (-15000) 1260 (-10000) 1134 (-5000) 1004 0 874 5000 750 10000 635 15000 532 20000 443 25000 367 30000 303 35000 250 40000 206 45000 170 50000 141 55000 117 60000 97 65000 81 70000 68 75000 57 80000 48 85000 41 90000 35 95000 30 100000 25 105000 22 110000 19 115000 16 120000 14 125000 12 130000 10 135000 9 140000 8 145000 7 150000 6>; }; thermal_sensor2: thermal-sensor-2 { compatible = "generic-adc-thermal"; #thermal-sensor-cells = <0>; io-channels = <&auxadc 2>; io-channel-names = "sensor-channel"; temperature-lookup-table = <(-25000) 1474 (-20000) 1374 (-15000) 1260 (-10000) 1134 (-5000) 1004 0 874 5000 750 10000 635 15000 532 20000 443 25000 367 30000 303 35000 250 40000 206 45000 170 50000 141 55000 117 60000 97 65000 81 70000 68 75000 57 80000 48 85000 41 90000 35 95000 30 100000 25 105000 22 110000 19 115000 16 120000 14 125000 12 130000 10 135000 9 140000 8 145000 7 150000 6>; }; }; &auxadc { status = "okay"; }; &eth { phy-mode ="rgmii-id"; phy-handle = <&ethernet_phy0>; pinctrl-names = "default", "sleep"; pinctrl-0 = <&eth_default_pins>; pinctrl-1 = <&eth_sleep_pins>; status = "okay"; mdio { ethernet_phy0: ethernet-phy@1 { compatible = "ethernet-phy-id001c.c916"; reg = <0x1>; interrupts-extended = <&pio 94 IRQ_TYPE_LEVEL_LOW>; reset-assert-us = <10000>; reset-deassert-us = <80000>; reset-gpios = <&pio 93 GPIO_ACTIVE_HIGH>; }; }; }; &gpu { status = "okay"; mali-supply = <&mt6315_7_vbuck1>; }; /* CSI1/CSI2 connector */ &i2c0 { pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins>; clock-frequency = <100000>; status = "okay"; }; /* CSI3 connector */ &i2c1 { pinctrl-names = "default"; pinctrl-0 = <&i2c1_pins>; clock-frequency = <100000>; status = "okay"; }; &i2c2 { pinctrl-names = "default"; pinctrl-0 = <&i2c2_pins>; clock-frequency = <400000>; status = "okay"; /* LVDS bridge @f */ }; /* Touch panel connector */ &i2c3 { pinctrl-names = "default"; pinctrl-0 = <&i2c3_pins>; clock-frequency = <100000>; status = "okay"; }; /* B2B connector */ &i2c4 { clock-frequency = <100000>; pinctrl-0 = <&i2c4_pins>; pinctrl-names = "default"; status = "okay"; }; &i2c6 { clock-frequency = <400000>; pinctrl-0 = <&i2c6_pins>; pinctrl-names = "default"; status = "okay"; mt6360: pmic@34 { compatible = "mediatek,mt6360"; reg = <0x34>; interrupt-controller; interrupts-extended = <&pio 101 IRQ_TYPE_EDGE_FALLING>; interrupt-names = "IRQB"; #interrupt-cells = <1>; regulator { compatible = "mediatek,mt6360-regulator"; LDO_VIN1-supply = <&vsys>; LDO_VIN2-supply = <&vsys>; LDO_VIN3-supply = <&vsys>; mt6360_buck1: BUCK1 { regulator-name = "emi_vdd2"; regulator-min-microvolt = <600000>; regulator-max-microvolt = <1800000>; regulator-allowed-modes = <MT6360_OPMODE_NORMAL MT6360_OPMODE_LP MT6360_OPMODE_ULP>; regulator-always-on; }; mt6360_buck2: BUCK2 { regulator-name = "emi_vddq"; regulator-min-microvolt = <300000>; regulator-max-microvolt = <1300000>; regulator-allowed-modes = <MT6360_OPMODE_NORMAL MT6360_OPMODE_LP MT6360_OPMODE_ULP>; regulator-always-on; }; mt6360_ldo1: LDO1 { regulator-name = "mt6360_ldo1"; /* Test point */ regulator-min-microvolt = <1200000>; regulator-max-microvolt = <3600000>; regulator-allowed-modes = <MT6360_OPMODE_NORMAL MT6360_OPMODE_LP>; }; mt6360_ldo2: LDO2 { regulator-name = "panel1_p1v8"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-allowed-modes = <MT6360_OPMODE_NORMAL MT6360_OPMODE_LP>; }; mt6360_ldo3: LDO3 { regulator-name = "vmc_pmu"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-allowed-modes = <MT6360_OPMODE_NORMAL MT6360_OPMODE_LP>; }; mt6360_ldo5: LDO5 { regulator-name = "vmch_pmu"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; regulator-allowed-modes = <MT6360_OPMODE_NORMAL MT6360_OPMODE_LP>; }; mt6360_ldo6: LDO6 { regulator-name = "mt6360_ldo6"; /* Test point */ regulator-min-microvolt = <500000>; regulator-max-microvolt = <2100000>; regulator-allowed-modes = <MT6360_OPMODE_NORMAL MT6360_OPMODE_LP>; }; mt6360_ldo7: LDO7 { regulator-name = "emi_vmddr_en"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <1800000>; regulator-allowed-modes = <MT6360_OPMODE_NORMAL MT6360_OPMODE_LP>; regulator-always-on; }; }; }; }; &mmc0 { pinctrl-names = "default", "state_uhs"; pinctrl-0 = <&mmc0_default_pins>; pinctrl-1 = <&mmc0_uhs_pins>; bus-width = <8>; max-frequency = <200000000>; hs400-ds-delay = <0x14c11>; cap-mmc-highspeed; cap-mmc-hw-reset; mmc-hs200-1_8v; mmc-hs400-1_8v; no-sdio; no-sd; non-removable; vmmc-supply = <&mt6359_vemc_1_ldo_reg>; vqmmc-supply = <&mt6359_vufs_ldo_reg>; status = "okay"; }; &mmc1 { pinctrl-names = "default", "state_uhs"; pinctrl-0 = <&mmc1_default_pins>, <&mmc1_detect_pins>; pinctrl-1 = <&mmc1_default_pins>; cd-gpios = <&pio 129 GPIO_ACTIVE_LOW>; bus-width = <4>; max-frequency = <200000000>; cap-sd-highspeed; sd-uhs-sdr50; sd-uhs-sdr104; no-mmc; vmmc-supply = <&mt6360_ldo5>; vqmmc-supply = <&mt6360_ldo3>; status = "okay"; }; &mt6359_vbbck_ldo_reg { regulator-always-on; }; &mt6359_vcore_buck_reg { regulator-always-on; }; &mt6359_vgpu11_buck_reg { regulator-always-on; }; &mt6359_vproc1_buck_reg { regulator-always-on; }; &mt6359_vproc2_buck_reg { regulator-always-on; }; &mt6359_vpu_buck_reg { regulator-always-on; }; &mt6359_vrf12_ldo_reg { regulator-always-on; }; &mt6359_vsram_md_ldo_reg { regulator-always-on; }; &mt6359_vsram_others_ldo_reg { regulator-always-on; }; &nor_flash { pinctrl-names = "default"; pinctrl-0 = <&nor_pins_default>; status = "okay"; flash@0 { compatible = "jedec,spi-nor"; reg = <0>; spi-max-frequency = <52000000>; spi-rx-bus-width = <2>; spi-tx-bus-width = <2>; }; }; &pcie0 { pinctrl-names = "default"; pinctrl-0 = <&pcie0_pins_default>; status = "okay"; }; &pcie1 { pinctrl-names = "default"; pinctrl-0 = <&pcie1_pins_default>; status = "okay"; }; &pciephy { status = "okay"; }; &pio { eth_default_pins: eth-default-pins { pins-txd { pinmux = <PINMUX_GPIO77__FUNC_GBE_TXD3>, <PINMUX_GPIO78__FUNC_GBE_TXD2>, <PINMUX_GPIO79__FUNC_GBE_TXD1>, <PINMUX_GPIO80__FUNC_GBE_TXD0>; drive-strength = <8>; }; pins-rxd { pinmux = <PINMUX_GPIO81__FUNC_GBE_RXD3>, <PINMUX_GPIO82__FUNC_GBE_RXD2>, <PINMUX_GPIO83__FUNC_GBE_RXD1>, <PINMUX_GPIO84__FUNC_GBE_RXD0>; }; pins-cc { pinmux = <PINMUX_GPIO85__FUNC_GBE_TXC>, <PINMUX_GPIO86__FUNC_GBE_RXC>, <PINMUX_GPIO87__FUNC_GBE_RXDV>, <PINMUX_GPIO88__FUNC_GBE_TXEN>; drive-strength = <8>; }; pins-mdio { pinmux = <PINMUX_GPIO89__FUNC_GBE_MDC>, <PINMUX_GPIO90__FUNC_GBE_MDIO>; input-enable; }; pins-power { pinmux = <PINMUX_GPIO91__FUNC_GPIO91>, <PINMUX_GPIO92__FUNC_GPIO92>; output-high; }; pins-reset { pinmux = <PINMUX_GPIO93__FUNC_GPIO93>; output-high; }; pins-interrupt { pinmux = <PINMUX_GPIO94__FUNC_GPIO94>; input-enable; }; }; eth_sleep_pins: eth-sleep-pins { pins-txd { pinmux = <PINMUX_GPIO77__FUNC_GPIO77>, <PINMUX_GPIO78__FUNC_GPIO78>, <PINMUX_GPIO79__FUNC_GPIO79>, <PINMUX_GPIO80__FUNC_GPIO80>; }; pins-cc { pinmux = <PINMUX_GPIO85__FUNC_GPIO85>, <PINMUX_GPIO88__FUNC_GPIO88>, <PINMUX_GPIO87__FUNC_GPIO87>, <PINMUX_GPIO86__FUNC_GPIO86>; }; pins-rxd { pinmux = <PINMUX_GPIO81__FUNC_GPIO81>, <PINMUX_GPIO82__FUNC_GPIO82>, <PINMUX_GPIO83__FUNC_GPIO83>, <PINMUX_GPIO84__FUNC_GPIO84>; }; pins-mdio { pinmux = <PINMUX_GPIO89__FUNC_GPIO89>, <PINMUX_GPIO90__FUNC_GPIO90>; input-disable; bias-disable; }; }; gpio_keys_pins: gpio-keys-pins { pins { pinmux = <PINMUX_GPIO106__FUNC_GPIO106>; input-enable; }; }; i2c0_pins: i2c0-pins { pins { pinmux = <PINMUX_GPIO8__FUNC_SDA0>, <PINMUX_GPIO9__FUNC_SCL0>; bias-pull-up = <MTK_PULL_SET_RSEL_111>; drive-strength-microamp = <1000>; }; }; i2c1_pins: i2c1-pins { pins { pinmux = <PINMUX_GPIO10__FUNC_SDA1>, <PINMUX_GPIO11__FUNC_SCL1>; bias-pull-up = <MTK_PULL_SET_RSEL_111>; drive-strength-microamp = <1000>; }; }; i2c2_pins: i2c2-default-pins { pins-bus { pinmux = <PINMUX_GPIO12__FUNC_SDA2>, <PINMUX_GPIO13__FUNC_SCL2>; bias-pull-up = <MTK_PULL_SET_RSEL_111>; drive-strength-microamp = <1000>; }; }; i2c3_pins: i2c3-pins { pins { pinmux = <PINMUX_GPIO14__FUNC_SDA3>, <PINMUX_GPIO15__FUNC_SCL3>; bias-pull-up = <MTK_PULL_SET_RSEL_111>; drive-strength-microamp = <1000>; }; }; i2c4_pins: i2c4-pins { pins { pinmux = <PINMUX_GPIO16__FUNC_SDA4>, <PINMUX_GPIO17__FUNC_SCL4>; bias-pull-up = <MTK_PULL_SET_RSEL_111>; drive-strength-microamp = <1000>; }; }; i2c6_pins: i2c6-pins { pins { pinmux = <PINMUX_GPIO25__FUNC_SDA6>, <PINMUX_GPIO26__FUNC_SCL6>; bias-pull-up; drive-strength-microamp = <1000>; }; }; mmc0_default_pins: mmc0-default-pins { pins-clk { pinmux = <PINMUX_GPIO122__FUNC_MSDC0_CLK>; drive-strength = <6>; bias-pull-down = <MTK_PUPD_SET_R1R0_10>; }; pins-cmd-dat { pinmux = <PINMUX_GPIO126__FUNC_MSDC0_DAT0>, <PINMUX_GPIO125__FUNC_MSDC0_DAT1>, <PINMUX_GPIO124__FUNC_MSDC0_DAT2>, <PINMUX_GPIO123__FUNC_MSDC0_DAT3>, <PINMUX_GPIO119__FUNC_MSDC0_DAT4>, <PINMUX_GPIO118__FUNC_MSDC0_DAT5>, <PINMUX_GPIO117__FUNC_MSDC0_DAT6>, <PINMUX_GPIO116__FUNC_MSDC0_DAT7>, <PINMUX_GPIO121__FUNC_MSDC0_CMD>; input-enable; drive-strength = <6>; bias-pull-up = <MTK_PUPD_SET_R1R0_01>; }; pins-rst { pinmux = <PINMUX_GPIO120__FUNC_MSDC0_RSTB>; drive-strength = <6>; bias-pull-up = <MTK_PUPD_SET_R1R0_01>; }; }; mmc0_uhs_pins: mmc0-uhs-pins { pins-clk { pinmux = <PINMUX_GPIO122__FUNC_MSDC0_CLK>; drive-strength = <8>; bias-pull-down = <MTK_PUPD_SET_R1R0_10>; }; pins-cmd-dat { pinmux = <PINMUX_GPIO126__FUNC_MSDC0_DAT0>, <PINMUX_GPIO125__FUNC_MSDC0_DAT1>, <PINMUX_GPIO124__FUNC_MSDC0_DAT2>, <PINMUX_GPIO123__FUNC_MSDC0_DAT3>, <PINMUX_GPIO119__FUNC_MSDC0_DAT4>, <PINMUX_GPIO118__FUNC_MSDC0_DAT5>, <PINMUX_GPIO117__FUNC_MSDC0_DAT6>, <PINMUX_GPIO116__FUNC_MSDC0_DAT7>, <PINMUX_GPIO121__FUNC_MSDC0_CMD>; input-enable; drive-strength = <8>; bias-pull-up = <MTK_PUPD_SET_R1R0_01>; }; pins-ds { pinmux = <PINMUX_GPIO127__FUNC_MSDC0_DSL>; drive-strength = <8>; bias-pull-down = <MTK_PUPD_SET_R1R0_10>; }; pins-rst { pinmux = <PINMUX_GPIO120__FUNC_MSDC0_RSTB>; drive-strength = <8>; bias-pull-up = <MTK_PUPD_SET_R1R0_01>; }; }; mmc1_default_pins: mmc1-default-pins { pins-clk { pinmux = <PINMUX_GPIO111__FUNC_MSDC1_CLK>; drive-strength = <8>; bias-pull-down = <MTK_PUPD_SET_R1R0_10>; }; pins-cmd-dat { pinmux = <PINMUX_GPIO110__FUNC_MSDC1_CMD>, <PINMUX_GPIO112__FUNC_MSDC1_DAT0>, <PINMUX_GPIO113__FUNC_MSDC1_DAT1>, <PINMUX_GPIO114__FUNC_MSDC1_DAT2>, <PINMUX_GPIO115__FUNC_MSDC1_DAT3>; input-enable; drive-strength = <8>; bias-pull-up = <MTK_PUPD_SET_R1R0_01>; }; }; mmc1_detect_pins: mmc1-detect-pins { pins-insert { pinmux = <PINMUX_GPIO129__FUNC_GPIO129>; bias-pull-up; }; }; nor_pins_default: nor-default-pins { pins-ck-io { pinmux = <PINMUX_GPIO142__FUNC_SPINOR_IO0>, <PINMUX_GPIO141__FUNC_SPINOR_CK>, <PINMUX_GPIO143__FUNC_SPINOR_IO1>; drive-strength = <6>; bias-pull-down; }; pins-cs { pinmux = <PINMUX_GPIO140__FUNC_SPINOR_CS>; drive-strength = <6>; bias-pull-up; }; }; pcie0_pins_default: pcie0-default-pins { pins-bus { pinmux = <PINMUX_GPIO19__FUNC_WAKEN>, <PINMUX_GPIO20__FUNC_PERSTN>, <PINMUX_GPIO21__FUNC_CLKREQN>; bias-pull-up; }; }; pcie1_pins_default: pcie1-default-pins { pins-bus { pinmux = <PINMUX_GPIO0__FUNC_PERSTN_1>, <PINMUX_GPIO1__FUNC_CLKREQN_1>, <PINMUX_GPIO2__FUNC_WAKEN_1>; bias-pull-up = <MTK_PUPD_SET_R1R0_01>; }; }; led_pins: led-pins { pins-power-en { pinmux = <PINMUX_GPIO107__FUNC_GPIO107>; output-high; }; }; spi0_pins: spi0-default-pins { pins-cs-mosi-clk { pinmux = <PINMUX_GPIO132__FUNC_SPIM0_CSB>, <PINMUX_GPIO134__FUNC_SPIM0_MO>, <PINMUX_GPIO133__FUNC_SPIM0_CLK>; bias-disable; }; pins-miso { pinmux = <PINMUX_GPIO135__FUNC_SPIM0_MI>; bias-pull-down; }; }; spi1_pins: spi1-default-pins { pins-cs-mosi-clk { pinmux = <PINMUX_GPIO136__FUNC_SPIM1_CSB>, <PINMUX_GPIO138__FUNC_SPIM1_MO>, <PINMUX_GPIO137__FUNC_SPIM1_CLK>; bias-disable; }; pins-miso { pinmux = <PINMUX_GPIO139__FUNC_SPIM1_MI>; bias-pull-down; }; }; uart0_pins: uart0-pins { pins-rx { pinmux = <PINMUX_GPIO99__FUNC_URXD0>; input-enable; bias-pull-up; }; pins-tx { pinmux = <PINMUX_GPIO98__FUNC_UTXD0>; }; }; uart1_pins: uart1-pins { pins-rx { pinmux = <PINMUX_GPIO103__FUNC_URXD1>; input-enable; bias-pull-up; }; pins-tx { pinmux = <PINMUX_GPIO102__FUNC_UTXD1>; }; pins-rts { pinmux = <PINMUX_GPIO100__FUNC_URTS1>; }; pins-cts { pinmux = <PINMUX_GPIO101__FUNC_UCTS1>; input-enable; }; }; uart2_pins: uart2-pins { pins-rx { pinmux = <PINMUX_GPIO68__FUNC_URXD2>; input-enable; bias-pull-up; }; pins-tx { pinmux = <PINMUX_GPIO67__FUNC_UTXD2>; }; pins-rts { pinmux = <PINMUX_GPIO66__FUNC_URTS2>; }; pins-cts { pinmux = <PINMUX_GPIO65__FUNC_UCTS2>; input-enable; }; }; uart3_pins: uart3-pins { pins-rx { pinmux = <PINMUX_GPIO5__FUNC_URXD3>; input-enable; bias-pull-up = <MTK_PUPD_SET_R1R0_01>; }; pins-tx { pinmux = <PINMUX_GPIO4__FUNC_UTXD3>; }; }; uart4_pins: uart4-pins { pins-rx { pinmux = <PINMUX_GPIO7__FUNC_URXD4>; input-enable; bias-pull-up; }; pins-tx { pinmux = <PINMUX_GPIO6__FUNC_UTXD4>; }; }; }; &pmic { interrupts-extended = <&pio 222 IRQ_TYPE_LEVEL_HIGH>; }; &scp { memory-region = <&scp_mem>; firmware-name = "mediatek/mt8195/scp.img"; status = "okay"; }; &spmi { #address-cells = <2>; #size-cells = <0>; mt6315@6 { compatible = "mediatek,mt6315-regulator"; reg = <0x6 SPMI_USID>; regulators { mt6315_6_vbuck1: vbuck1 { regulator-name = "Vbcpu"; regulator-min-microvolt = <300000>; regulator-max-microvolt = <1193750>; regulator-enable-ramp-delay = <256>; regulator-ramp-delay = <6250>; regulator-allowed-modes = <0 1 2>; regulator-always-on; }; }; }; mt6315@7 { compatible = "mediatek,mt6315-regulator"; reg = <0x7 SPMI_USID>; regulators { mt6315_7_vbuck1: vbuck1 { regulator-name = "Vgpu"; regulator-min-microvolt = <625000>; regulator-max-microvolt = <1193750>; regulator-enable-ramp-delay = <256>; regulator-ramp-delay = <6250>; regulator-allowed-modes = <0 1 2>; regulator-always-on; }; }; }; }; /* USB3.2 front port */ &ssusb0 { dr_mode = "host"; vusb33-supply = <&mt6359_vusb_ldo_reg>; status = "okay"; }; /* USB2.0 M.2 Key-E */ &ssusb2 { vusb33-supply = <&mt6359_vusb_ldo_reg>; status = "okay"; }; /* USB2.0 to on-board usb hub */ &ssusb3 { vusb33-supply = <&mt6359_vusb_ldo_reg>; status = "okay"; }; &spi0 { pinctrl-names = "default"; pinctrl-0 = <&spi0_pins>; mediatek,pad-select = <0>; status = "okay"; tpm: tpm@0 { compatible = "infineon,slb9670", "tcg,tpm_tis-spi"; reg = <0>; spi-max-frequency = <18500000>; }; }; /* B2B connector */ &spi1 { pinctrl-names = "default"; pinctrl-0 = <&spi1_pins>; mediatek,pad-select = <0>; status = "okay"; }; &thermal_zones { cpu-thermal { polling-delay = <1000>; /* milliseconds */ polling-delay-passive = <0>; /* milliseconds */ thermal-sensors = <&thermal_sensor0>; trips { trip-alert { temperature = <85000>; hysteresis = <2000>; type = "passive"; }; trip-crit { temperature = <95000>; hysteresis = <2000>; type = "critical"; }; }; }; pcb-top-thermal { polling-delay = <1000>; /* milliseconds */ polling-delay-passive = <0>; /* milliseconds */ thermal-sensors = <&thermal_sensor1>; trips { trip-alert { temperature = <75000>; hysteresis = <2000>; type = "passive"; }; trip-crit { temperature = <85000>; hysteresis = <2000>; type = "critical"; }; }; }; pcb-bottom-thermal { polling-delay = <1000>; /* milliseconds */ polling-delay-passive = <0>; /* milliseconds */ thermal-sensors = <&thermal_sensor2>; trips { trip-alert { temperature = <75000>; hysteresis = <2000>; type = "passive"; }; trip-crit { temperature = <85000>; hysteresis = <2000>; type = "critical"; }; }; }; }; &uart0 { pinctrl-names = "default"; pinctrl-0 = <&uart0_pins>; status = "okay"; }; &uart1 { pinctrl-names = "default"; pinctrl-0 = <&uart1_pins>; uart-has-rtscts; status = "okay"; }; &uart2 { pinctrl-names = "default"; pinctrl-0 = <&uart2_pins>; uart-has-rtscts; status = "okay"; }; &uart3 { pinctrl-names = "default"; pinctrl-0 = <&uart3_pins>; status = "okay"; }; &uart4 { pinctrl-names = "default"; pinctrl-0 = <&uart4_pins>; status = "okay"; }; /* USB3 */ &u3phy0 { status = "okay"; }; /* PCIe1/USB2 */ &u3phy1 { status = "okay"; }; /* USB2 */ &u3phy2 { status = "okay"; }; /* USB2 */ &u3phy3 { status = "okay"; }; /* USB3.2 front port */ &xhci0 { status = "okay"; }; /* USB2.0 M.2 Key-B */ &xhci1 { phys = <&u2port1 PHY_TYPE_USB2>; vusb33-supply = <&mt6359_vusb_ldo_reg>; mediatek,u3p-dis-msk = <0x01>; status = "okay"; }; /* USB2.0 M.2 Key-E */ &xhci2 { status = "okay"; }; /* USB2.0 to on-board usb hub */ &xhci3 { status = "okay"; };
// SPDX-License-Identifier: GPL-2.0+ /* Copyright (c) 2015-2016 Quantenna Communications. All rights reserved. */ #include <linux/types.h> #include <linux/io.h> #include "shm_ipc.h" #undef pr_fmt #define pr_fmt(fmt) "qtnfmac shm_ipc: %s: " fmt, __func__ static bool qtnf_shm_ipc_has_new_data(struct qtnf_shm_ipc *ipc) { const u32 flags = readl(&ipc->shm_region->headroom.hdr.flags); return (flags & QTNF_SHM_IPC_NEW_DATA); } static void qtnf_shm_handle_new_data(struct qtnf_shm_ipc *ipc) { size_t size; bool rx_buff_ok = true; struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr; shm_reg_hdr = &ipc->shm_region->headroom.hdr; size = readw(&shm_reg_hdr->data_len); if (unlikely(size == 0 || size > QTN_IPC_MAX_DATA_SZ)) { pr_err("wrong rx packet size: %zu\n", size); rx_buff_ok = false; } if (likely(rx_buff_ok)) { ipc->rx_packet_count++; ipc->rx_callback.fn(ipc->rx_callback.arg, ipc->shm_region->data, size); } writel(QTNF_SHM_IPC_ACK, &shm_reg_hdr->flags); readl(&shm_reg_hdr->flags); /* flush PCIe write */ ipc->interrupt.fn(ipc->interrupt.arg); } static void qtnf_shm_ipc_irq_work(struct work_struct *work) { struct qtnf_shm_ipc *ipc = container_of(work, struct qtnf_shm_ipc, irq_work); while (qtnf_shm_ipc_has_new_data(ipc)) qtnf_shm_handle_new_data(ipc); } static void qtnf_shm_ipc_irq_inbound_handler(struct qtnf_shm_ipc *ipc) { u32 flags; flags = readl(&ipc->shm_region->headroom.hdr.flags); if (flags & QTNF_SHM_IPC_NEW_DATA) queue_work(ipc->workqueue, &ipc->irq_work); } static void qtnf_shm_ipc_irq_outbound_handler(struct qtnf_shm_ipc *ipc) { u32 flags; if (!READ_ONCE(ipc->waiting_for_ack)) return; flags = readl(&ipc->shm_region->headroom.hdr.flags); if (flags & QTNF_SHM_IPC_ACK) { WRITE_ONCE(ipc->waiting_for_ack, 0); complete(&ipc->tx_completion); } } int qtnf_shm_ipc_init(struct qtnf_shm_ipc *ipc, enum qtnf_shm_ipc_direction direction, struct qtnf_shm_ipc_region __iomem *shm_region, struct workqueue_struct *workqueue, const struct qtnf_shm_ipc_int *interrupt, const struct qtnf_shm_ipc_rx_callback *rx_callback) { BUILD_BUG_ON(offsetof(struct qtnf_shm_ipc_region, data) != QTN_IPC_REG_HDR_SZ); BUILD_BUG_ON(sizeof(struct qtnf_shm_ipc_region) > QTN_IPC_REG_SZ); ipc->shm_region = shm_region; ipc->direction = direction; ipc->interrupt = *interrupt; ipc->rx_callback = *rx_callback; ipc->tx_packet_count = 0; ipc->rx_packet_count = 0; ipc->workqueue = workqueue; ipc->waiting_for_ack = 0; ipc->tx_timeout_count = 0; switch (direction) { case QTNF_SHM_IPC_OUTBOUND: ipc->irq_handler = qtnf_shm_ipc_irq_outbound_handler; break; case QTNF_SHM_IPC_INBOUND: ipc->irq_handler = qtnf_shm_ipc_irq_inbound_handler; break; default: return -EINVAL; } INIT_WORK(&ipc->irq_work, qtnf_shm_ipc_irq_work); init_completion(&ipc->tx_completion); return 0; } void qtnf_shm_ipc_free(struct qtnf_shm_ipc *ipc) { complete_all(&ipc->tx_completion); } int qtnf_shm_ipc_send(struct qtnf_shm_ipc *ipc, const u8 *buf, size_t size) { int ret = 0; struct qtnf_shm_ipc_region_header __iomem *shm_reg_hdr; shm_reg_hdr = &ipc->shm_region->headroom.hdr; if (unlikely(size > QTN_IPC_MAX_DATA_SZ)) return -E2BIG; ipc->tx_packet_count++; writew(size, &shm_reg_hdr->data_len); memcpy_toio(ipc->shm_region->data, buf, size); /* sync previous writes before proceeding */ dma_wmb(); WRITE_ONCE(ipc->waiting_for_ack, 1); /* sync previous memory write before announcing new data ready */ wmb(); writel(QTNF_SHM_IPC_NEW_DATA, &shm_reg_hdr->flags); readl(&shm_reg_hdr->flags); /* flush PCIe write */ ipc->interrupt.fn(ipc->interrupt.arg); if (!wait_for_completion_timeout(&ipc->tx_completion, QTN_SHM_IPC_ACK_TIMEOUT)) { ret = -ETIMEDOUT; ipc->tx_timeout_count++; pr_err("TX ACK timeout\n"); } /* now we're not waiting for ACK even in case of timeout */ WRITE_ONCE(ipc->waiting_for_ack, 0); return ret; }
/* * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #ifndef __MLX5_CORE_H__ #define __MLX5_CORE_H__ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/if_link.h> #include <linux/firmware.h> #include <linux/mlx5/cq.h> #include <linux/mlx5/fs.h> #include <linux/mlx5/driver.h> #include "lib/devcom.h" extern uint mlx5_core_debug_mask; #define mlx5_core_dbg(__dev, format, ...) \ dev_dbg((__dev)->device, "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_dbg_once(__dev, format, ...) \ dev_dbg_once((__dev)->device, \ "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_dbg_mask(__dev, mask, format, ...) \ do { \ if ((mask) & mlx5_core_debug_mask) \ mlx5_core_dbg(__dev, format, ##__VA_ARGS__); \ } while (0) #define mlx5_core_err(__dev, format, ...) \ dev_err((__dev)->device, "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_err_rl(__dev, format, ...) \ dev_err_ratelimited((__dev)->device, \ "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_warn(__dev, format, ...) \ dev_warn((__dev)->device, "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_warn_once(__dev, format, ...) \ dev_warn_once((__dev)->device, "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_warn_rl(__dev, format, ...) \ dev_warn_ratelimited((__dev)->device, \ "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define mlx5_core_info(__dev, format, ...) \ dev_info((__dev)->device, format, ##__VA_ARGS__) #define mlx5_core_info_rl(__dev, format, ...) \ dev_info_ratelimited((__dev)->device, \ "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) #define ACCESS_KEY_LEN 32 #define FT_ID_FT_TYPE_OFFSET 24 struct mlx5_cmd_allow_other_vhca_access_attr { u16 obj_type; u32 obj_id; u8 access_key[ACCESS_KEY_LEN]; }; struct mlx5_cmd_alias_obj_create_attr { u32 obj_id; u16 vhca_id; u16 obj_type; u8 access_key[ACCESS_KEY_LEN]; }; static inline void mlx5_printk(struct mlx5_core_dev *dev, int level, const char *format, ...) { struct device *device = dev->device; struct va_format vaf; va_list args; if (WARN_ONCE(level < LOGLEVEL_EMERG || level > LOGLEVEL_DEBUG, "Level %d is out of range, set to default level\n", level)) level = LOGLEVEL_DEFAULT; va_start(args, format); vaf.fmt = format; vaf.va = &args; dev_printk_emit(level, device, "%s %s: %pV", dev_driver_string(device), dev_name(device), &vaf); va_end(args); } #define mlx5_log(__dev, level, format, ...) \ mlx5_printk(__dev, level, "%s:%d:(pid %d): " format, \ __func__, __LINE__, current->pid, \ ##__VA_ARGS__) static inline struct device *mlx5_core_dma_dev(struct mlx5_core_dev *dev) { return &dev->pdev->dev; } enum { MLX5_CMD_DATA, /* print command payload only */ MLX5_CMD_TIME, /* print command execution time */ }; enum { MLX5_DRIVER_STATUS_ABORTED = 0xfe, MLX5_DRIVER_SYND = 0xbadd00de, }; enum mlx5_semaphore_space_address { MLX5_SEMAPHORE_SPACE_DOMAIN = 0xA, MLX5_SEMAPHORE_SW_RESET = 0x20, }; #define MLX5_DEFAULT_PROF 2 #define MLX5_SF_PROF 3 #define MLX5_NUM_FW_CMD_THREADS 8 #define MLX5_DEV_MAX_WQS MLX5_NUM_FW_CMD_THREADS static inline int mlx5_flexible_inlen(struct mlx5_core_dev *dev, size_t fixed, size_t item_size, size_t num_items, const char *func, int line) { int inlen; if (fixed > INT_MAX || item_size > INT_MAX || num_items > INT_MAX) { mlx5_core_err(dev, "%s: %s:%d: input values too big: %zu + %zu * %zu\n", __func__, func, line, fixed, item_size, num_items); return -ENOMEM; } if (check_mul_overflow((int)item_size, (int)num_items, &inlen)) { mlx5_core_err(dev, "%s: %s:%d: multiplication overflow: %zu + %zu * %zu\n", __func__, func, line, fixed, item_size, num_items); return -ENOMEM; } if (check_add_overflow((int)fixed, inlen, &inlen)) { mlx5_core_err(dev, "%s: %s:%d: addition overflow: %zu + %zu * %zu\n", __func__, func, line, fixed, item_size, num_items); return -ENOMEM; } return inlen; } #define MLX5_FLEXIBLE_INLEN(dev, fixed, item_size, num_items) \ mlx5_flexible_inlen(dev, fixed, item_size, num_items, __func__, __LINE__) int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type); int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type, enum mlx5_cap_mode cap_mode); int mlx5_query_hca_caps(struct mlx5_core_dev *dev); int mlx5_query_board_id(struct mlx5_core_dev *dev); int mlx5_query_module_num(struct mlx5_core_dev *dev, int *module_num); int mlx5_cmd_init(struct mlx5_core_dev *dev); void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); int mlx5_cmd_enable(struct mlx5_core_dev *dev); void mlx5_cmd_disable(struct mlx5_core_dev *dev); void mlx5_cmd_set_state(struct mlx5_core_dev *dev, enum mlx5_cmdif_state cmdif_state); int mlx5_cmd_init_hca(struct mlx5_core_dev *dev, u32 *sw_owner_id); int mlx5_cmd_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_force_teardown_hca(struct mlx5_core_dev *dev); int mlx5_cmd_fast_teardown_hca(struct mlx5_core_dev *dev); void mlx5_enter_error_state(struct mlx5_core_dev *dev, bool force); void mlx5_error_sw_reset(struct mlx5_core_dev *dev); u32 mlx5_health_check_fatal_sensors(struct mlx5_core_dev *dev); int mlx5_health_wait_pci_up(struct mlx5_core_dev *dev); void mlx5_disable_device(struct mlx5_core_dev *dev); int mlx5_recover_device(struct mlx5_core_dev *dev); int mlx5_sriov_init(struct mlx5_core_dev *dev); void mlx5_sriov_cleanup(struct mlx5_core_dev *dev); int mlx5_sriov_attach(struct mlx5_core_dev *dev); void mlx5_sriov_detach(struct mlx5_core_dev *dev); int mlx5_core_sriov_configure(struct pci_dev *dev, int num_vfs); void mlx5_sriov_disable(struct pci_dev *pdev, bool num_vf_change); int mlx5_core_sriov_set_msix_vec_count(struct pci_dev *vf, int msix_vec_count); int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id); int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id); bool mlx5_qos_element_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy); bool mlx5_qos_tsar_type_supported(struct mlx5_core_dev *dev, int type, u8 hierarchy); int mlx5_create_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, void *context, u32 *element_id); int mlx5_modify_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, void *context, u32 element_id, u32 modify_bitmask); int mlx5_destroy_scheduling_element_cmd(struct mlx5_core_dev *dev, u8 hierarchy, u32 element_id); int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages); void mlx5_cmd_flush(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, u8 access_reg_group); int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group, u8 access_reg_group); int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam, u8 feature_group, u8 access_reg_group); int mlx5_query_mpir_reg(struct mlx5_core_dev *dev, u32 *mpir); void mlx5_lag_add_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_remove_netdev(struct mlx5_core_dev *dev, struct net_device *netdev); void mlx5_lag_add_mdev(struct mlx5_core_dev *dev); void mlx5_lag_remove_mdev(struct mlx5_core_dev *dev); void mlx5_lag_disable_change(struct mlx5_core_dev *dev); void mlx5_lag_enable_change(struct mlx5_core_dev *dev); int mlx5_events_init(struct mlx5_core_dev *dev); void mlx5_events_cleanup(struct mlx5_core_dev *dev); void mlx5_events_start(struct mlx5_core_dev *dev); void mlx5_events_stop(struct mlx5_core_dev *dev); int mlx5_adev_idx_alloc(void); void mlx5_adev_idx_free(int idx); void mlx5_adev_cleanup(struct mlx5_core_dev *dev); int mlx5_adev_init(struct mlx5_core_dev *dev); int mlx5_attach_device(struct mlx5_core_dev *dev); void mlx5_detach_device(struct mlx5_core_dev *dev, bool suspend); int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev); bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev); void mlx5_fw_reporters_create(struct mlx5_core_dev *dev); int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size); int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode); int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode); struct mlx5_dm *mlx5_dm_create(struct mlx5_core_dev *dev); void mlx5_dm_cleanup(struct mlx5_core_dev *dev); #define MLX5_PPS_CAP(mdev) (MLX5_CAP_GEN((mdev), pps) && \ MLX5_CAP_GEN((mdev), pps_modify) && \ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_fs) && \ MLX5_CAP_MCAM_FEATURE((mdev), mtpps_enh_out_per_adj)) int mlx5_firmware_flash(struct mlx5_core_dev *dev, const struct firmware *fw, struct netlink_ext_ack *extack); int mlx5_fw_version_query(struct mlx5_core_dev *dev, u32 *running_ver, u32 *stored_ver); #ifdef CONFIG_MLX5_CORE_EN int mlx5e_init(void); void mlx5e_cleanup(void); #else static inline int mlx5e_init(void){ return 0; } static inline void mlx5e_cleanup(void){} #endif static inline bool mlx5_sriov_is_enabled(struct mlx5_core_dev *dev) { return pci_num_vf(dev->pdev) ? true : false; } int mlx5_rescan_drivers_locked(struct mlx5_core_dev *dev); static inline int mlx5_rescan_drivers(struct mlx5_core_dev *dev) { int ret; mlx5_devcom_comp_lock(dev->priv.hca_devcom_comp); ret = mlx5_rescan_drivers_locked(dev); mlx5_devcom_comp_unlock(dev->priv.hca_devcom_comp); return ret; } u8 mlx5_get_nic_state(struct mlx5_core_dev *dev); void mlx5_set_nic_state(struct mlx5_core_dev *dev, u8 state); static inline bool mlx5_core_is_sf(const struct mlx5_core_dev *dev) { return dev->coredev_type == MLX5_COREDEV_SF; } static inline struct auxiliary_device * mlx5_sf_coredev_to_adev(struct mlx5_core_dev *mdev) { return container_of(mdev->device, struct auxiliary_device, dev); } int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx); void mlx5_mdev_uninit(struct mlx5_core_dev *dev); int mlx5_init_one(struct mlx5_core_dev *dev); int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev); void mlx5_uninit_one(struct mlx5_core_dev *dev); void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend); void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend); int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery); int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery); int mlx5_init_one_light(struct mlx5_core_dev *dev); void mlx5_uninit_one_light(struct mlx5_core_dev *dev); void mlx5_unload_one_light(struct mlx5_core_dev *dev); int mlx5_vport_set_other_func_cap(struct mlx5_core_dev *dev, const void *hca_cap, u16 vport, u16 opmod); #define mlx5_vport_get_other_func_general_cap(dev, vport, out) \ mlx5_vport_get_other_func_cap(dev, vport, out, MLX5_CAP_GENERAL) static inline u32 mlx5_sriov_get_vf_total_msix(struct pci_dev *pdev) { struct mlx5_core_dev *dev = pci_get_drvdata(pdev); return MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix); } bool mlx5_eth_supported(struct mlx5_core_dev *dev); bool mlx5_rdma_supported(struct mlx5_core_dev *dev); bool mlx5_vnet_supported(struct mlx5_core_dev *dev); bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev); int mlx5_cmd_allow_other_vhca_access(struct mlx5_core_dev *dev, struct mlx5_cmd_allow_other_vhca_access_attr *attr); int mlx5_cmd_alias_obj_create(struct mlx5_core_dev *dev, struct mlx5_cmd_alias_obj_create_attr *alias_attr, u32 *obj_id); int mlx5_cmd_alias_obj_destroy(struct mlx5_core_dev *dev, u32 obj_id, u16 obj_type); static inline u16 mlx5_core_ec_vf_vport_base(const struct mlx5_core_dev *dev) { return MLX5_CAP_GEN_2(dev, ec_vf_vport_base); } static inline u16 mlx5_core_ec_sriov_enabled(const struct mlx5_core_dev *dev) { return mlx5_core_is_ecpf(dev) && mlx5_core_ec_vf_vport_base(dev); } static inline bool mlx5_core_is_ec_vf_vport(const struct mlx5_core_dev *dev, u16 vport_num) { int base_vport = mlx5_core_ec_vf_vport_base(dev); int max_vport = base_vport + mlx5_core_max_ec_vfs(dev); if (!mlx5_core_ec_sriov_enabled(dev)) return false; return (vport_num >= base_vport && vport_num < max_vport); } static inline int mlx5_vport_to_func_id(const struct mlx5_core_dev *dev, u16 vport, bool ec_vf_func) { return ec_vf_func ? vport - mlx5_core_ec_vf_vport_base(dev) + 1 : vport; } static inline int mlx5_max_eq_cap_get(const struct mlx5_core_dev *dev) { if (MLX5_CAP_GEN_2(dev, max_num_eqs_24b)) return MLX5_CAP_GEN_2(dev, max_num_eqs_24b); if (MLX5_CAP_GEN(dev, max_num_eqs)) return MLX5_CAP_GEN(dev, max_num_eqs); return 1 << MLX5_CAP_GEN(dev, log_max_eq); } #endif /* __MLX5_CORE_H__ */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2013 QLogic Corporation */ #ifndef _QL4XNVRM_H_ #define _QL4XNVRM_H_ /** * AM29LV Flash definitions **/ #define FM93C56A_SIZE_8 0x100 #define FM93C56A_SIZE_16 0x80 #define FM93C66A_SIZE_8 0x200 #define FM93C66A_SIZE_16 0x100/* 4010 */ #define FM93C86A_SIZE_16 0x400/* 4022 */ #define FM93C56A_START 0x1 /* Commands */ #define FM93C56A_READ 0x2 #define FM93C56A_WEN 0x0 #define FM93C56A_WRITE 0x1 #define FM93C56A_WRITE_ALL 0x0 #define FM93C56A_WDS 0x0 #define FM93C56A_ERASE 0x3 #define FM93C56A_ERASE_ALL 0x0 /* Command Extensions */ #define FM93C56A_WEN_EXT 0x3 #define FM93C56A_WRITE_ALL_EXT 0x1 #define FM93C56A_WDS_EXT 0x0 #define FM93C56A_ERASE_ALL_EXT 0x2 /* Address Bits */ #define FM93C56A_NO_ADDR_BITS_16 8 /* 4010 */ #define FM93C56A_NO_ADDR_BITS_8 9 /* 4010 */ #define FM93C86A_NO_ADDR_BITS_16 10 /* 4022 */ /* Data Bits */ #define FM93C56A_DATA_BITS_16 16 #define FM93C56A_DATA_BITS_8 8 /* Special Bits */ #define FM93C56A_READ_DUMMY_BITS 1 #define FM93C56A_READY 0 #define FM93C56A_BUSY 1 #define FM93C56A_CMD_BITS 2 /* Auburn Bits */ #define AUBURN_EEPROM_DI 0x8 #define AUBURN_EEPROM_DI_0 0x0 #define AUBURN_EEPROM_DI_1 0x8 #define AUBURN_EEPROM_DO 0x4 #define AUBURN_EEPROM_DO_0 0x0 #define AUBURN_EEPROM_DO_1 0x4 #define AUBURN_EEPROM_CS 0x2 #define AUBURN_EEPROM_CS_0 0x0 #define AUBURN_EEPROM_CS_1 0x2 #define AUBURN_EEPROM_CLK_RISE 0x1 #define AUBURN_EEPROM_CLK_FALL 0x0 /**/ /* EEPROM format */ /**/ struct bios_params { uint16_t SpinUpDelay:1; uint16_t BIOSDisable:1; uint16_t MMAPEnable:1; uint16_t BootEnable:1; uint16_t Reserved0:12; uint8_t bootID0:7; uint8_t bootID0Valid:1; uint8_t bootLUN0[8]; uint8_t bootID1:7; uint8_t bootID1Valid:1; uint8_t bootLUN1[8]; uint16_t MaxLunsPerTarget; uint8_t Reserved1[10]; }; struct eeprom_port_cfg { /* MTU MAC 0 */ u16 etherMtu_mac; /* Flow Control MAC 0 */ u16 pauseThreshold_mac; u16 resumeThreshold_mac; u16 reserved[13]; }; struct eeprom_function_cfg { u8 reserved[30]; /* MAC ADDR */ u8 macAddress[6]; u8 macAddressSecondary[6]; u16 subsysVendorId; u16 subsysDeviceId; }; struct eeprom_data { union { struct { /* isp4010 */ u8 asic_id[4]; /* x00 */ u8 version; /* x04 */ u8 reserved; /* x05 */ u16 board_id; /* x06 */ #define EEPROM_BOARDID_ELDORADO 1 #define EEPROM_BOARDID_PLACER 2 #define EEPROM_SERIAL_NUM_SIZE 16 u8 serial_number[EEPROM_SERIAL_NUM_SIZE]; /* x08 */ /* ExtHwConfig: */ /* Offset = 24bytes * * | SSRAM Size| |ST|PD|SDRAM SZ| W| B| SP | | * |15|14|13|12|11|10| 9| 8| 7| 6| 5| 4| 3| 2| 1| 0| * +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+ */ u16 ext_hw_conf; /* x18 */ u8 mac0[6]; /* x1A */ u8 mac1[6]; /* x20 */ u8 mac2[6]; /* x26 */ u8 mac3[6]; /* x2C */ u16 etherMtu; /* x32 */ u16 macConfig; /* x34 */ #define MAC_CONFIG_ENABLE_ANEG 0x0001 #define MAC_CONFIG_ENABLE_PAUSE 0x0002 u16 phyConfig; /* x36 */ #define PHY_CONFIG_PHY_ADDR_MASK 0x1f #define PHY_CONFIG_ENABLE_FW_MANAGEMENT_MASK 0x20 u16 reserved_56; /* x38 */ #define EEPROM_UNUSED_1_SIZE 2 u8 unused_1[EEPROM_UNUSED_1_SIZE]; /* x3A */ u16 bufletSize; /* x3C */ u16 bufletCount; /* x3E */ u16 bufletPauseThreshold; /* x40 */ u16 tcpWindowThreshold50; /* x42 */ u16 tcpWindowThreshold25; /* x44 */ u16 tcpWindowThreshold0; /* x46 */ u16 ipHashTableBaseHi; /* x48 */ u16 ipHashTableBaseLo; /* x4A */ u16 ipHashTableSize; /* x4C */ u16 tcpHashTableBaseHi; /* x4E */ u16 tcpHashTableBaseLo; /* x50 */ u16 tcpHashTableSize; /* x52 */ u16 ncbTableBaseHi; /* x54 */ u16 ncbTableBaseLo; /* x56 */ u16 ncbTableSize; /* x58 */ u16 drbTableBaseHi; /* x5A */ u16 drbTableBaseLo; /* x5C */ u16 drbTableSize; /* x5E */ #define EEPROM_UNUSED_2_SIZE 4 u8 unused_2[EEPROM_UNUSED_2_SIZE]; /* x60 */ u16 ipReassemblyTimeout; /* x64 */ u16 tcpMaxWindowSizeHi; /* x66 */ u16 tcpMaxWindowSizeLo; /* x68 */ u32 net_ip_addr0; /* x6A Added for TOE * functionality. */ u32 net_ip_addr1; /* x6E */ u32 scsi_ip_addr0; /* x72 */ u32 scsi_ip_addr1; /* x76 */ #define EEPROM_UNUSED_3_SIZE 128 /* changed from 144 to account * for ip addresses */ u8 unused_3[EEPROM_UNUSED_3_SIZE]; /* x7A */ u16 subsysVendorId_f0; /* xFA */ u16 subsysDeviceId_f0; /* xFC */ /* Address = 0x7F */ #define FM93C56A_SIGNATURE 0x9356 #define FM93C66A_SIGNATURE 0x9366 u16 signature; /* xFE */ #define EEPROM_UNUSED_4_SIZE 250 u8 unused_4[EEPROM_UNUSED_4_SIZE]; /* x100 */ u16 subsysVendorId_f1; /* x1FA */ u16 subsysDeviceId_f1; /* x1FC */ u16 checksum; /* x1FE */ } __attribute__ ((packed)) isp4010; struct { /* isp4022 */ u8 asicId[4]; /* x00 */ u8 version; /* x04 */ u8 reserved_5; /* x05 */ u16 boardId; /* x06 */ u8 boardIdStr[16]; /* x08 */ u8 serialNumber[16]; /* x18 */ /* External Hardware Configuration */ u16 ext_hw_conf; /* x28 */ /* MAC 0 CONFIGURATION */ struct eeprom_port_cfg macCfg_port0; /* x2A */ /* MAC 1 CONFIGURATION */ struct eeprom_port_cfg macCfg_port1; /* x4A */ /* DDR SDRAM Configuration */ u16 bufletSize; /* x6A */ u16 bufletCount; /* x6C */ u16 tcpWindowThreshold50; /* x6E */ u16 tcpWindowThreshold25; /* x70 */ u16 tcpWindowThreshold0; /* x72 */ u16 ipHashTableBaseHi; /* x74 */ u16 ipHashTableBaseLo; /* x76 */ u16 ipHashTableSize; /* x78 */ u16 tcpHashTableBaseHi; /* x7A */ u16 tcpHashTableBaseLo; /* x7C */ u16 tcpHashTableSize; /* x7E */ u16 ncbTableBaseHi; /* x80 */ u16 ncbTableBaseLo; /* x82 */ u16 ncbTableSize; /* x84 */ u16 drbTableBaseHi; /* x86 */ u16 drbTableBaseLo; /* x88 */ u16 drbTableSize; /* x8A */ u16 reserved_142[4]; /* x8C */ /* TCP/IP Parameters */ u16 ipReassemblyTimeout; /* x94 */ u16 tcpMaxWindowSize; /* x96 */ u16 ipSecurity; /* x98 */ u8 reserved_156[294]; /* x9A */ u16 qDebug[8]; /* QLOGIC USE ONLY x1C0 */ struct eeprom_function_cfg funcCfg_fn0; /* x1D0 */ u16 reserved_510; /* x1FE */ /* Address = 512 */ u8 oemSpace[432]; /* x200 */ struct bios_params sBIOSParams_fn1; /* x3B0 */ struct eeprom_function_cfg funcCfg_fn1; /* x3D0 */ u16 reserved_1022; /* x3FE */ /* Address = 1024 */ u8 reserved_1024[464]; /* x400 */ struct eeprom_function_cfg funcCfg_fn2; /* x5D0 */ u16 reserved_1534; /* x5FE */ /* Address = 1536 */ u8 reserved_1536[432]; /* x600 */ struct bios_params sBIOSParams_fn3; /* x7B0 */ struct eeprom_function_cfg funcCfg_fn3; /* x7D0 */ u16 checksum; /* x7FE */ } __attribute__ ((packed)) isp4022; }; }; #endif /* _QL4XNVRM_H_ */
// SPDX-License-Identifier: GPL-2.0 /* * SH7366 Setup * * Copyright (C) 2008 Renesas Solutions * * Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/uio_driver.h> #include <linux/sh_timer.h> #include <linux/sh_intc.h> #include <linux/usb/r8a66597.h> #include <asm/clock.h> #include <asm/platform_early.h> static struct plat_sci_port scif0_platform_data = { .scscr = SCSCR_REIE, .type = PORT_SCIF, }; static struct resource scif0_resources[] = { DEFINE_RES_MEM(0xffe00000, 0x100), DEFINE_RES_IRQ(evt2irq(0xc00)), }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .resource = scif0_resources, .num_resources = ARRAY_SIZE(scif0_resources), .dev = { .platform_data = &scif0_platform_data, }, }; static struct resource iic_resources[] = { [0] = { .name = "IIC", .start = 0x04470000, .end = 0x04470017, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xe00), .end = evt2irq(0xe60), .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic_device = { .name = "i2c-sh_mobile", .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic_resources), .resource = iic_resources, }; static struct r8a66597_platdata r8a66597_data = { .on_chip = 1, }; static struct resource usb_host_resources[] = { [0] = { .start = 0xa4d80000, .end = 0xa4d800ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xa20), .end = evt2irq(0xa20), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device usb_host_device = { .name = "r8a66597_hcd", .id = -1, .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, .platform_data = &r8a66597_data, }, .num_resources = ARRAY_SIZE(usb_host_resources), .resource = usb_host_resources, }; static struct uio_info vpu_platform_data = { .name = "VPU5", .version = "0", .irq = evt2irq(0x980), }; static struct resource vpu_resources[] = { [0] = { .name = "VPU", .start = 0xfe900000, .end = 0xfe902807, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device vpu_device = { .name = "uio_pdrv_genirq", .id = 0, .dev = { .platform_data = &vpu_platform_data, }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), }; static struct uio_info veu0_platform_data = { .name = "VEU", .version = "0", .irq = evt2irq(0x8c0), }; static struct resource veu0_resources[] = { [0] = { .name = "VEU(1)", .start = 0xfe920000, .end = 0xfe9200b7, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu0_device = { .name = "uio_pdrv_genirq", .id = 1, .dev = { .platform_data = &veu0_platform_data, }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), }; static struct uio_info veu1_platform_data = { .name = "VEU", .version = "0", .irq = evt2irq(0x560), }; static struct resource veu1_resources[] = { [0] = { .name = "VEU(2)", .start = 0xfe924000, .end = 0xfe9240b7, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu1_device = { .name = "uio_pdrv_genirq", .id = 2, .dev = { .platform_data = &veu1_platform_data, }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), }; static struct sh_timer_config cmt_platform_data = { .channels_mask = 0x20, }; static struct resource cmt_resources[] = { DEFINE_RES_MEM(0x044a0000, 0x70), DEFINE_RES_IRQ(evt2irq(0xf00)), }; static struct platform_device cmt_device = { .name = "sh-cmt-32", .id = 0, .dev = { .platform_data = &cmt_platform_data, }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), }; static struct sh_timer_config tmu0_platform_data = { .channels_mask = 7, }; static struct resource tmu0_resources[] = { DEFINE_RES_MEM(0xffd80000, 0x2c), DEFINE_RES_IRQ(evt2irq(0x400)), DEFINE_RES_IRQ(evt2irq(0x420)), DEFINE_RES_IRQ(evt2irq(0x440)), }; static struct platform_device tmu0_device = { .name = "sh-tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct platform_device *sh7366_devices[] __initdata = { &scif0_device, &cmt_device, &tmu0_device, &iic_device, &usb_host_device, &vpu_device, &veu0_device, &veu1_device, }; static int __init sh7366_devices_setup(void) { platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20); platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); return platform_add_devices(sh7366_devices, ARRAY_SIZE(sh7366_devices)); } arch_initcall(sh7366_devices_setup); static struct platform_device *sh7366_early_devices[] __initdata = { &scif0_device, &cmt_device, &tmu0_device, }; void __init plat_early_device_setup(void) { sh_early_platform_add_devices(sh7366_early_devices, ARRAY_SIZE(sh7366_early_devices)); } enum { UNUSED=0, ENABLED, DISABLED, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, ICB, DMAC0, DMAC1, DMAC2, DMAC3, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU, MFI, VPU, USB, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I, DMAC4, DMAC5, DMAC_DADERR, SCIF, SCIFA1, SCIFA2, DENC, MSIOF, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, SDHI, CMT, TSIF, SIU, TMU0, TMU1, TMU2, VEU2, LCDC, /* interrupt groups */ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), INTC_VECT(ICB, 0x700), INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820), INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860), INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0), INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0), INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20), INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20), INTC_VECT(MMC_MMC3I, 0xb40), INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0), INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20), INTC_VECT(SCIFA2, 0xc40), INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80), INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0), INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0), INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0), INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), INTC_VECT(SIU, 0xf80), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), INTC_VECT(TMU2, 0x440), INTC_VECT(VEU2, 0x560), INTC_VECT(LCDC, 0x580), }; static struct intc_group groups[] __initdata = { INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3), INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU), INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I), INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR), INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ { } }, { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } }, { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ { 0, 0, 0, VPU, 0, 0, 0, MFI } }, { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ { 0, 0, 0, ICB } }, { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ { 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } }, { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ { 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } }, { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ { 0, 0, 0, 0, 0, 0, 0, MSIOF } }, { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT, 0, USB, } }, { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ { 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } }, { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ { 0, 0, 0, 0, 0, 0, 0, TSIF } }, { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } }, { 0xa4080008, 0, 16, 4, /* IPRC */ { } }, { 0xa408000c, 0, 16, 4, /* IPRD */ { } }, { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } }, { 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } }, { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } }, { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } }, { 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } }, { 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } }, { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } }, { 0xa408002c, 0, 16, 4, /* IPRL */ { } }, { 0xa4140010, 0, 32, 4, /* INTPRI00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_sense_reg sense_registers[] __initdata = { { 0xa414001c, 16, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_mask_reg ack_registers[] __initdata = { { 0xa4140024, 0, 8, /* INTREQ00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_desc intc_desc __initdata = { .name = "sh7366", .force_enable = ENABLED, .force_disable = DISABLED, .hw = INTC_HW_DESC(vectors, groups, mask_registers, prio_registers, sense_registers, ack_registers), }; void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } void __init plat_mem_setup(void) { /* TODO: Register Node 1 */ }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * * Nov 7, 2000 * Modification to allow integration with Linux kernel * * Kevin D. Kissell, [email protected] and Carsten Langgard, [email protected] * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. */ #ifndef __ARCH_MIPS_MATH_EMU_IEEE754_H #define __ARCH_MIPS_MATH_EMU_IEEE754_H #include <linux/compiler.h> #include <asm/byteorder.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/sched.h> #include <asm/bitfield.h> union ieee754dp { struct { __BITFIELD_FIELD(unsigned int sign:1, __BITFIELD_FIELD(unsigned int bexp:11, __BITFIELD_FIELD(u64 mant:52, ;))) }; u64 bits; }; union ieee754sp { struct { __BITFIELD_FIELD(unsigned sign:1, __BITFIELD_FIELD(unsigned bexp:8, __BITFIELD_FIELD(unsigned mant:23, ;))) }; u32 bits; }; /* * single precision (often aka float) */ int ieee754sp_class(union ieee754sp x); union ieee754sp ieee754sp_abs(union ieee754sp x); union ieee754sp ieee754sp_neg(union ieee754sp x); union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_mul(union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_div(union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_fint(int x); union ieee754sp ieee754sp_flong(s64 x); union ieee754sp ieee754sp_fdp(union ieee754dp x); union ieee754sp ieee754sp_rint(union ieee754sp x); int ieee754sp_tint(union ieee754sp x); s64 ieee754sp_tlong(union ieee754sp x); int ieee754sp_cmp(union ieee754sp x, union ieee754sp y, int cop, int sig); union ieee754sp ieee754sp_sqrt(union ieee754sp x); union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_madd(union ieee754sp z, union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_msub(union ieee754sp z, union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_nmadd(union ieee754sp z, union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_nmsub(union ieee754sp z, union ieee754sp x, union ieee754sp y); int ieee754sp_2008class(union ieee754sp x); union ieee754sp ieee754sp_fmin(union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_fmina(union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_fmax(union ieee754sp x, union ieee754sp y); union ieee754sp ieee754sp_fmaxa(union ieee754sp x, union ieee754sp y); /* * double precision (often aka double) */ int ieee754dp_class(union ieee754dp x); union ieee754dp ieee754dp_add(union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_sub(union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_div(union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_abs(union ieee754dp x); union ieee754dp ieee754dp_neg(union ieee754dp x); union ieee754dp ieee754dp_fint(int x); union ieee754dp ieee754dp_flong(s64 x); union ieee754dp ieee754dp_fsp(union ieee754sp x); union ieee754dp ieee754dp_rint(union ieee754dp x); int ieee754dp_tint(union ieee754dp x); s64 ieee754dp_tlong(union ieee754dp x); int ieee754dp_cmp(union ieee754dp x, union ieee754dp y, int cop, int sig); union ieee754dp ieee754dp_sqrt(union ieee754dp x); union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_madd(union ieee754dp z, union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_msub(union ieee754dp z, union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_nmadd(union ieee754dp z, union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_nmsub(union ieee754dp z, union ieee754dp x, union ieee754dp y); int ieee754dp_2008class(union ieee754dp x); union ieee754dp ieee754dp_fmin(union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_fmina(union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_fmax(union ieee754dp x, union ieee754dp y); union ieee754dp ieee754dp_fmaxa(union ieee754dp x, union ieee754dp y); /* 5 types of floating point number */ enum { IEEE754_CLASS_NORM = 0x00, IEEE754_CLASS_ZERO = 0x01, IEEE754_CLASS_DNORM = 0x02, IEEE754_CLASS_INF = 0x03, IEEE754_CLASS_SNAN = 0x04, IEEE754_CLASS_QNAN = 0x05, }; /* exception numbers */ #define IEEE754_INEXACT 0x01 #define IEEE754_UNDERFLOW 0x02 #define IEEE754_OVERFLOW 0x04 #define IEEE754_ZERO_DIVIDE 0x08 #define IEEE754_INVALID_OPERATION 0x10 /* cmp operators */ #define IEEE754_CLT 0x01 #define IEEE754_CEQ 0x02 #define IEEE754_CGT 0x04 #define IEEE754_CUN 0x08 /* * The control status register */ struct _ieee754_csr { __BITFIELD_FIELD(unsigned fcc:7, /* condition[7:1] */ __BITFIELD_FIELD(unsigned nod:1, /* set 1 for no denormals */ __BITFIELD_FIELD(unsigned c:1, /* condition[0] */ __BITFIELD_FIELD(unsigned pad0:3, __BITFIELD_FIELD(unsigned abs2008:1, /* IEEE 754-2008 ABS/NEG.fmt */ __BITFIELD_FIELD(unsigned nan2008:1, /* IEEE 754-2008 NaN mode */ __BITFIELD_FIELD(unsigned cx:6, /* exceptions this operation */ __BITFIELD_FIELD(unsigned mx:5, /* exception enable mask */ __BITFIELD_FIELD(unsigned sx:5, /* exceptions total */ __BITFIELD_FIELD(unsigned rm:2, /* current rounding mode */ ;)))))))))) }; #define ieee754_csr (*(struct _ieee754_csr *)(&current->thread.fpu.fcr31)) static inline unsigned int ieee754_getrm(void) { return (ieee754_csr.rm); } static inline unsigned int ieee754_setrm(unsigned int rm) { return (ieee754_csr.rm = rm); } /* * get current exceptions */ static inline unsigned int ieee754_getcx(void) { return (ieee754_csr.cx); } /* test for current exception condition */ static inline int ieee754_cxtest(unsigned int n) { return (ieee754_csr.cx & n); } /* * get sticky exceptions */ static inline unsigned int ieee754_getsx(void) { return (ieee754_csr.sx); } /* clear sticky conditions */ static inline unsigned int ieee754_clrsx(void) { return (ieee754_csr.sx = 0); } /* test for sticky exception condition */ static inline int ieee754_sxtest(unsigned int n) { return (ieee754_csr.sx & n); } /* debugging */ union ieee754sp ieee754sp_dump(char *s, union ieee754sp x); union ieee754dp ieee754dp_dump(char *s, union ieee754dp x); #define IEEE754_SPCVAL_PZERO 0 /* +0.0 */ #define IEEE754_SPCVAL_NZERO 1 /* -0.0 */ #define IEEE754_SPCVAL_PONE 2 /* +1.0 */ #define IEEE754_SPCVAL_NONE 3 /* -1.0 */ #define IEEE754_SPCVAL_PTEN 4 /* +10.0 */ #define IEEE754_SPCVAL_NTEN 5 /* -10.0 */ #define IEEE754_SPCVAL_PINFINITY 6 /* +inf */ #define IEEE754_SPCVAL_NINFINITY 7 /* -inf */ #define IEEE754_SPCVAL_INDEF_LEG 8 /* legacy quiet NaN */ #define IEEE754_SPCVAL_INDEF_2008 9 /* IEEE 754-2008 quiet NaN */ #define IEEE754_SPCVAL_PMAX 10 /* +max norm */ #define IEEE754_SPCVAL_NMAX 11 /* -max norm */ #define IEEE754_SPCVAL_PMIN 12 /* +min norm */ #define IEEE754_SPCVAL_NMIN 13 /* -min norm */ #define IEEE754_SPCVAL_PMIND 14 /* +min denorm */ #define IEEE754_SPCVAL_NMIND 15 /* -min denorm */ #define IEEE754_SPCVAL_P1E31 16 /* + 1.0e31 */ #define IEEE754_SPCVAL_P1E63 17 /* + 1.0e63 */ extern const union ieee754dp __ieee754dp_spcvals[]; extern const union ieee754sp __ieee754sp_spcvals[]; #define ieee754dp_spcvals ((const union ieee754dp *)__ieee754dp_spcvals) #define ieee754sp_spcvals ((const union ieee754sp *)__ieee754sp_spcvals) /* * Return infinity with given sign */ #define ieee754dp_inf(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PINFINITY+(sn)]) #define ieee754dp_zero(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PZERO+(sn)]) #define ieee754dp_one(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PONE+(sn)]) #define ieee754dp_ten(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PTEN+(sn)]) #define ieee754dp_indef() (ieee754dp_spcvals[IEEE754_SPCVAL_INDEF_LEG + \ ieee754_csr.nan2008]) #define ieee754dp_max(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PMAX+(sn)]) #define ieee754dp_min(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PMIN+(sn)]) #define ieee754dp_mind(sn) (ieee754dp_spcvals[IEEE754_SPCVAL_PMIND+(sn)]) #define ieee754dp_1e31() (ieee754dp_spcvals[IEEE754_SPCVAL_P1E31]) #define ieee754dp_1e63() (ieee754dp_spcvals[IEEE754_SPCVAL_P1E63]) #define ieee754sp_inf(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PINFINITY+(sn)]) #define ieee754sp_zero(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PZERO+(sn)]) #define ieee754sp_one(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PONE+(sn)]) #define ieee754sp_ten(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PTEN+(sn)]) #define ieee754sp_indef() (ieee754sp_spcvals[IEEE754_SPCVAL_INDEF_LEG + \ ieee754_csr.nan2008]) #define ieee754sp_max(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PMAX+(sn)]) #define ieee754sp_min(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PMIN+(sn)]) #define ieee754sp_mind(sn) (ieee754sp_spcvals[IEEE754_SPCVAL_PMIND+(sn)]) #define ieee754sp_1e31() (ieee754sp_spcvals[IEEE754_SPCVAL_P1E31]) #define ieee754sp_1e63() (ieee754sp_spcvals[IEEE754_SPCVAL_P1E63]) /* * Indefinite integer value */ static inline int ieee754si_indef(void) { return ieee754_csr.nan2008 ? 0 : INT_MAX; } static inline s64 ieee754di_indef(void) { return ieee754_csr.nan2008 ? 0 : S64_MAX; } /* * Overflow integer value */ static inline int ieee754si_overflow(int xs) { return ieee754_csr.nan2008 && xs ? INT_MIN : INT_MAX; } static inline s64 ieee754di_overflow(int xs) { return ieee754_csr.nan2008 && xs ? S64_MIN : S64_MAX; } /* result types for xctx.rt */ #define IEEE754_RT_SP 0 #define IEEE754_RT_DP 1 #define IEEE754_RT_XP 2 #define IEEE754_RT_SI 3 #define IEEE754_RT_DI 4 /* compat */ #define ieee754dp_fix(x) ieee754dp_tint(x) #define ieee754sp_fix(x) ieee754sp_tint(x) #endif /* __ARCH_MIPS_MATH_EMU_IEEE754_H */
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * Device memory TCP support * * Authors: Mina Almasry <[email protected]> * Willem de Bruijn <[email protected]> * Kaiyuan Zhang <[email protected]> * */ #ifndef _NET_DEVMEM_H #define _NET_DEVMEM_H struct netlink_ext_ack; struct net_devmem_dmabuf_binding { struct dma_buf *dmabuf; struct dma_buf_attachment *attachment; struct sg_table *sgt; struct net_device *dev; struct gen_pool *chunk_pool; /* The user holds a ref (via the netlink API) for as long as they want * the binding to remain alive. Each page pool using this binding holds * a ref to keep the binding alive. Each allocated net_iov holds a * ref. * * The binding undos itself and unmaps the underlying dmabuf once all * those refs are dropped and the binding is no longer desired or in * use. */ refcount_t ref; /* The list of bindings currently active. Used for netlink to notify us * of the user dropping the bind. */ struct list_head list; /* rxq's this binding is active on. */ struct xarray bound_rxqs; /* ID of this binding. Globally unique to all bindings currently * active. */ u32 id; }; #if defined(CONFIG_NET_DEVMEM) /* Owner of the dma-buf chunks inserted into the gen pool. Each scatterlist * entry from the dmabuf is inserted into the genpool as a chunk, and needs * this owner struct to keep track of some metadata necessary to create * allocations from this chunk. */ struct dmabuf_genpool_chunk_owner { /* Offset into the dma-buf where this chunk starts. */ unsigned long base_virtual; /* dma_addr of the start of the chunk. */ dma_addr_t base_dma_addr; /* Array of net_iovs for this chunk. */ struct net_iov *niovs; size_t num_niovs; struct net_devmem_dmabuf_binding *binding; }; void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding); struct net_devmem_dmabuf_binding * net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, struct netlink_ext_ack *extack); void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding); int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, struct net_devmem_dmabuf_binding *binding, struct netlink_ext_ack *extack); void dev_dmabuf_uninstall(struct net_device *dev); static inline struct dmabuf_genpool_chunk_owner * net_iov_owner(const struct net_iov *niov) { return niov->owner; } static inline unsigned int net_iov_idx(const struct net_iov *niov) { return niov - net_iov_owner(niov)->niovs; } static inline struct net_devmem_dmabuf_binding * net_iov_binding(const struct net_iov *niov) { return net_iov_owner(niov)->binding; } static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov) { struct dmabuf_genpool_chunk_owner *owner = net_iov_owner(niov); return owner->base_virtual + ((unsigned long)net_iov_idx(niov) << PAGE_SHIFT); } static inline u32 net_iov_binding_id(const struct net_iov *niov) { return net_iov_owner(niov)->binding->id; } static inline void net_devmem_dmabuf_binding_get(struct net_devmem_dmabuf_binding *binding) { refcount_inc(&binding->ref); } static inline void net_devmem_dmabuf_binding_put(struct net_devmem_dmabuf_binding *binding) { if (!refcount_dec_and_test(&binding->ref)) return; __net_devmem_dmabuf_binding_free(binding); } struct net_iov * net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding); void net_devmem_free_dmabuf(struct net_iov *ppiov); #else struct net_devmem_dmabuf_binding; static inline void __net_devmem_dmabuf_binding_free(struct net_devmem_dmabuf_binding *binding) { } static inline struct net_devmem_dmabuf_binding * net_devmem_bind_dmabuf(struct net_device *dev, unsigned int dmabuf_fd, struct netlink_ext_ack *extack) { return ERR_PTR(-EOPNOTSUPP); } static inline void net_devmem_unbind_dmabuf(struct net_devmem_dmabuf_binding *binding) { } static inline int net_devmem_bind_dmabuf_to_queue(struct net_device *dev, u32 rxq_idx, struct net_devmem_dmabuf_binding *binding, struct netlink_ext_ack *extack) { return -EOPNOTSUPP; } static inline void dev_dmabuf_uninstall(struct net_device *dev) { } static inline struct net_iov * net_devmem_alloc_dmabuf(struct net_devmem_dmabuf_binding *binding) { return NULL; } static inline void net_devmem_free_dmabuf(struct net_iov *ppiov) { } static inline unsigned long net_iov_virtual_addr(const struct net_iov *niov) { return 0; } static inline u32 net_iov_binding_id(const struct net_iov *niov) { return 0; } #endif #endif /* _NET_DEVMEM_H */
// SPDX-License-Identifier: GPL-2.0 /* * Media device request objects * * Copyright 2018 Cisco Systems, Inc. and/or its affiliates. All rights reserved. * Copyright (C) 2018 Intel Corporation * Copyright (C) 2018 Google, Inc. * * Author: Hans Verkuil <[email protected]> * Author: Sakari Ailus <[email protected]> */ #include <linux/anon_inodes.h> #include <linux/file.h> #include <linux/refcount.h> #include <media/media-device.h> #include <media/media-request.h> static const char * const request_state[] = { [MEDIA_REQUEST_STATE_IDLE] = "idle", [MEDIA_REQUEST_STATE_VALIDATING] = "validating", [MEDIA_REQUEST_STATE_QUEUED] = "queued", [MEDIA_REQUEST_STATE_COMPLETE] = "complete", [MEDIA_REQUEST_STATE_CLEANING] = "cleaning", [MEDIA_REQUEST_STATE_UPDATING] = "updating", }; static const char * media_request_state_str(enum media_request_state state) { BUILD_BUG_ON(ARRAY_SIZE(request_state) != NR_OF_MEDIA_REQUEST_STATE); if (WARN_ON(state >= ARRAY_SIZE(request_state))) return "invalid"; return request_state[state]; } static void media_request_clean(struct media_request *req) { struct media_request_object *obj, *obj_safe; /* Just a sanity check. No other code path is allowed to change this. */ WARN_ON(req->state != MEDIA_REQUEST_STATE_CLEANING); WARN_ON(req->updating_count); WARN_ON(req->access_count); list_for_each_entry_safe(obj, obj_safe, &req->objects, list) { media_request_object_unbind(obj); media_request_object_put(obj); } req->updating_count = 0; req->access_count = 0; WARN_ON(req->num_incomplete_objects); req->num_incomplete_objects = 0; wake_up_interruptible_all(&req->poll_wait); } static void media_request_release(struct kref *kref) { struct media_request *req = container_of(kref, struct media_request, kref); struct media_device *mdev = req->mdev; dev_dbg(mdev->dev, "request: release %s\n", req->debug_str); /* No other users, no need for a spinlock */ req->state = MEDIA_REQUEST_STATE_CLEANING; media_request_clean(req); if (mdev->ops->req_free) mdev->ops->req_free(req); else kfree(req); } void media_request_put(struct media_request *req) { kref_put(&req->kref, media_request_release); } EXPORT_SYMBOL_GPL(media_request_put); static int media_request_close(struct inode *inode, struct file *filp) { struct media_request *req = filp->private_data; media_request_put(req); return 0; } static __poll_t media_request_poll(struct file *filp, struct poll_table_struct *wait) { struct media_request *req = filp->private_data; unsigned long flags; __poll_t ret = 0; if (!(poll_requested_events(wait) & EPOLLPRI)) return 0; poll_wait(filp, &req->poll_wait, wait); spin_lock_irqsave(&req->lock, flags); if (req->state == MEDIA_REQUEST_STATE_COMPLETE) { ret = EPOLLPRI; goto unlock; } if (req->state != MEDIA_REQUEST_STATE_QUEUED) { ret = EPOLLERR; goto unlock; } unlock: spin_unlock_irqrestore(&req->lock, flags); return ret; } static long media_request_ioctl_queue(struct media_request *req) { struct media_device *mdev = req->mdev; enum media_request_state state; unsigned long flags; int ret; dev_dbg(mdev->dev, "request: queue %s\n", req->debug_str); /* * Ensure the request that is validated will be the one that gets queued * next by serialising the queueing process. This mutex is also used * to serialize with canceling a vb2 queue and with setting values such * as controls in a request. */ mutex_lock(&mdev->req_queue_mutex); media_request_get(req); spin_lock_irqsave(&req->lock, flags); if (req->state == MEDIA_REQUEST_STATE_IDLE) req->state = MEDIA_REQUEST_STATE_VALIDATING; state = req->state; spin_unlock_irqrestore(&req->lock, flags); if (state != MEDIA_REQUEST_STATE_VALIDATING) { dev_dbg(mdev->dev, "request: unable to queue %s, request in state %s\n", req->debug_str, media_request_state_str(state)); media_request_put(req); mutex_unlock(&mdev->req_queue_mutex); return -EBUSY; } ret = mdev->ops->req_validate(req); /* * If the req_validate was successful, then we mark the state as QUEUED * and call req_queue. The reason we set the state first is that this * allows req_queue to unbind or complete the queued objects in case * they are immediately 'consumed'. State changes from QUEUED to another * state can only happen if either the driver changes the state or if * the user cancels the vb2 queue. The driver can only change the state * after each object is queued through the req_queue op (and note that * that op cannot fail), so setting the state to QUEUED up front is * safe. * * The other reason for changing the state is if the vb2 queue is * canceled, and that uses the req_queue_mutex which is still locked * while req_queue is called, so that's safe as well. */ spin_lock_irqsave(&req->lock, flags); req->state = ret ? MEDIA_REQUEST_STATE_IDLE : MEDIA_REQUEST_STATE_QUEUED; spin_unlock_irqrestore(&req->lock, flags); if (!ret) mdev->ops->req_queue(req); mutex_unlock(&mdev->req_queue_mutex); if (ret) { dev_dbg(mdev->dev, "request: can't queue %s (%d)\n", req->debug_str, ret); media_request_put(req); } return ret; } static long media_request_ioctl_reinit(struct media_request *req) { struct media_device *mdev = req->mdev; unsigned long flags; spin_lock_irqsave(&req->lock, flags); if (req->state != MEDIA_REQUEST_STATE_IDLE && req->state != MEDIA_REQUEST_STATE_COMPLETE) { dev_dbg(mdev->dev, "request: %s not in idle or complete state, cannot reinit\n", req->debug_str); spin_unlock_irqrestore(&req->lock, flags); return -EBUSY; } if (req->access_count) { dev_dbg(mdev->dev, "request: %s is being accessed, cannot reinit\n", req->debug_str); spin_unlock_irqrestore(&req->lock, flags); return -EBUSY; } req->state = MEDIA_REQUEST_STATE_CLEANING; spin_unlock_irqrestore(&req->lock, flags); media_request_clean(req); spin_lock_irqsave(&req->lock, flags); req->state = MEDIA_REQUEST_STATE_IDLE; spin_unlock_irqrestore(&req->lock, flags); return 0; } static long media_request_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct media_request *req = filp->private_data; switch (cmd) { case MEDIA_REQUEST_IOC_QUEUE: return media_request_ioctl_queue(req); case MEDIA_REQUEST_IOC_REINIT: return media_request_ioctl_reinit(req); default: return -ENOIOCTLCMD; } } static const struct file_operations request_fops = { .owner = THIS_MODULE, .poll = media_request_poll, .unlocked_ioctl = media_request_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = media_request_ioctl, #endif /* CONFIG_COMPAT */ .release = media_request_close, }; struct media_request * media_request_get_by_fd(struct media_device *mdev, int request_fd) { struct media_request *req; if (!mdev || !mdev->ops || !mdev->ops->req_validate || !mdev->ops->req_queue) return ERR_PTR(-EBADR); CLASS(fd, f)(request_fd); if (fd_empty(f)) goto err; if (fd_file(f)->f_op != &request_fops) goto err; req = fd_file(f)->private_data; if (req->mdev != mdev) goto err; /* * Note: as long as someone has an open filehandle of the request, * the request can never be released. The fdget() above ensures that * even if userspace closes the request filehandle, the release() * fop won't be called, so the media_request_get() always succeeds * and there is no race condition where the request was released * before media_request_get() is called. */ media_request_get(req); return req; err: dev_dbg(mdev->dev, "cannot find request_fd %d\n", request_fd); return ERR_PTR(-EINVAL); } EXPORT_SYMBOL_GPL(media_request_get_by_fd); int media_request_alloc(struct media_device *mdev, int *alloc_fd) { struct media_request *req; struct file *filp; int fd; int ret; /* Either both are NULL or both are non-NULL */ if (WARN_ON(!mdev->ops->req_alloc ^ !mdev->ops->req_free)) return -ENOMEM; if (mdev->ops->req_alloc) req = mdev->ops->req_alloc(mdev); else req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) { ret = fd; goto err_free_req; } filp = anon_inode_getfile("request", &request_fops, NULL, O_CLOEXEC); if (IS_ERR(filp)) { ret = PTR_ERR(filp); goto err_put_fd; } filp->private_data = req; req->mdev = mdev; req->state = MEDIA_REQUEST_STATE_IDLE; req->num_incomplete_objects = 0; kref_init(&req->kref); INIT_LIST_HEAD(&req->objects); spin_lock_init(&req->lock); init_waitqueue_head(&req->poll_wait); req->updating_count = 0; req->access_count = 0; *alloc_fd = fd; snprintf(req->debug_str, sizeof(req->debug_str), "%u:%d", atomic_inc_return(&mdev->request_id), fd); dev_dbg(mdev->dev, "request: allocated %s\n", req->debug_str); fd_install(fd, filp); return 0; err_put_fd: put_unused_fd(fd); err_free_req: if (mdev->ops->req_free) mdev->ops->req_free(req); else kfree(req); return ret; } static void media_request_object_release(struct kref *kref) { struct media_request_object *obj = container_of(kref, struct media_request_object, kref); struct media_request *req = obj->req; if (WARN_ON(req)) media_request_object_unbind(obj); obj->ops->release(obj); } struct media_request_object * media_request_object_find(struct media_request *req, const struct media_request_object_ops *ops, void *priv) { struct media_request_object *obj; struct media_request_object *found = NULL; unsigned long flags; if (WARN_ON(!ops || !priv)) return NULL; spin_lock_irqsave(&req->lock, flags); list_for_each_entry(obj, &req->objects, list) { if (obj->ops == ops && obj->priv == priv) { media_request_object_get(obj); found = obj; break; } } spin_unlock_irqrestore(&req->lock, flags); return found; } EXPORT_SYMBOL_GPL(media_request_object_find); void media_request_object_put(struct media_request_object *obj) { kref_put(&obj->kref, media_request_object_release); } EXPORT_SYMBOL_GPL(media_request_object_put); void media_request_object_init(struct media_request_object *obj) { obj->ops = NULL; obj->req = NULL; obj->priv = NULL; obj->completed = false; INIT_LIST_HEAD(&obj->list); kref_init(&obj->kref); } EXPORT_SYMBOL_GPL(media_request_object_init); int media_request_object_bind(struct media_request *req, const struct media_request_object_ops *ops, void *priv, bool is_buffer, struct media_request_object *obj) { unsigned long flags; int ret = -EBUSY; if (WARN_ON(!ops->release)) return -EBADR; spin_lock_irqsave(&req->lock, flags); if (WARN_ON(req->state != MEDIA_REQUEST_STATE_UPDATING && req->state != MEDIA_REQUEST_STATE_QUEUED)) goto unlock; obj->req = req; obj->ops = ops; obj->priv = priv; if (is_buffer) list_add_tail(&obj->list, &req->objects); else list_add(&obj->list, &req->objects); req->num_incomplete_objects++; ret = 0; unlock: spin_unlock_irqrestore(&req->lock, flags); return ret; } EXPORT_SYMBOL_GPL(media_request_object_bind); void media_request_object_unbind(struct media_request_object *obj) { struct media_request *req = obj->req; unsigned long flags; bool completed = false; if (WARN_ON(!req)) return; spin_lock_irqsave(&req->lock, flags); list_del(&obj->list); obj->req = NULL; if (req->state == MEDIA_REQUEST_STATE_COMPLETE) goto unlock; if (WARN_ON(req->state == MEDIA_REQUEST_STATE_VALIDATING)) goto unlock; if (req->state == MEDIA_REQUEST_STATE_CLEANING) { if (!obj->completed) req->num_incomplete_objects--; goto unlock; } if (WARN_ON(!req->num_incomplete_objects)) goto unlock; req->num_incomplete_objects--; if (req->state == MEDIA_REQUEST_STATE_QUEUED && !req->num_incomplete_objects) { req->state = MEDIA_REQUEST_STATE_COMPLETE; completed = true; wake_up_interruptible_all(&req->poll_wait); } unlock: spin_unlock_irqrestore(&req->lock, flags); if (obj->ops->unbind) obj->ops->unbind(obj); if (completed) media_request_put(req); } EXPORT_SYMBOL_GPL(media_request_object_unbind); void media_request_object_complete(struct media_request_object *obj) { struct media_request *req = obj->req; unsigned long flags; bool completed = false; spin_lock_irqsave(&req->lock, flags); if (obj->completed) goto unlock; obj->completed = true; if (WARN_ON(!req->num_incomplete_objects) || WARN_ON(req->state != MEDIA_REQUEST_STATE_QUEUED)) goto unlock; if (!--req->num_incomplete_objects) { req->state = MEDIA_REQUEST_STATE_COMPLETE; wake_up_interruptible_all(&req->poll_wait); completed = true; } unlock: spin_unlock_irqrestore(&req->lock, flags); if (completed) media_request_put(req); } EXPORT_SYMBOL_GPL(media_request_object_complete);
/* * e500mc Power ISA Device Tree Source (include) * * Copyright 2012 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ / { cpus { power-isa-version = "2.06"; power-isa-b; // Base power-isa-e; // Embedded power-isa-atb; // Alternate Time Base power-isa-cs; // Cache Specification power-isa-ds; // Decorated Storage power-isa-e.ed; // Embedded.Enhanced Debug power-isa-e.pd; // Embedded.External PID power-isa-e.hv; // Embedded.Hypervisor power-isa-e.le; // Embedded.Little-Endian power-isa-e.pm; // Embedded.Performance Monitor power-isa-e.pc; // Embedded.Processor Control power-isa-ecl; // Embedded Cache Locking power-isa-exp; // External Proxy power-isa-fp; // Floating Point power-isa-fp.r; // Floating Point.Record power-isa-mmc; // Memory Coherence power-isa-scpm; // Store Conditional Page Mobility power-isa-wt; // Wait fsl,eref-deo; // Data Cache Extended Operations mmu-type = "power-embedded"; }; };
// SPDX-License-Identifier: GPL-2.0-or-later /* * * device driver for Conexant 2388x based TV cards * video4linux video interface * * (c) 2003-04 Gerd Knorr <[email protected]> [SuSE Labs] * * (c) 2005-2006 Mauro Carvalho Chehab <[email protected]> * - Multituner support * - video_ioctl2 conversion * - PAL/M fixes */ #include "cx88.h" #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kmod.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/kthread.h> #include <asm/div64.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-event.h> #include <media/i2c/wm8775.h> MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards"); MODULE_AUTHOR("Gerd Knorr <[email protected]> [SuSE Labs]"); MODULE_LICENSE("GPL v2"); MODULE_VERSION(CX88_VERSION); /* ------------------------------------------------------------------ */ static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET }; module_param_array(video_nr, int, NULL, 0444); module_param_array(vbi_nr, int, NULL, 0444); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(video_nr, "video device numbers"); MODULE_PARM_DESC(vbi_nr, "vbi device numbers"); MODULE_PARM_DESC(radio_nr, "radio device numbers"); static unsigned int video_debug; module_param(video_debug, int, 0644); MODULE_PARM_DESC(video_debug, "enable debug messages [video]"); static unsigned int irq_debug; module_param(irq_debug, int, 0644); MODULE_PARM_DESC(irq_debug, "enable debug messages [IRQ handler]"); #define dprintk(level, fmt, arg...) do { \ if (video_debug >= level) \ printk(KERN_DEBUG pr_fmt("%s: video:" fmt), \ __func__, ##arg); \ } while (0) /* ------------------------------------------------------------------- */ /* static data */ static const struct cx8800_fmt formats[] = { { .fourcc = V4L2_PIX_FMT_GREY, .cxformat = ColorFormatY8, .depth = 8, .flags = FORMAT_FLAGS_PACKED, }, { .fourcc = V4L2_PIX_FMT_RGB555, .cxformat = ColorFormatRGB15, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { .fourcc = V4L2_PIX_FMT_RGB555X, .cxformat = ColorFormatRGB15 | ColorFormatBSWAP, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { .fourcc = V4L2_PIX_FMT_RGB565, .cxformat = ColorFormatRGB16, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { .fourcc = V4L2_PIX_FMT_RGB565X, .cxformat = ColorFormatRGB16 | ColorFormatBSWAP, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { .fourcc = V4L2_PIX_FMT_BGR24, .cxformat = ColorFormatRGB24, .depth = 24, .flags = FORMAT_FLAGS_PACKED, }, { .fourcc = V4L2_PIX_FMT_BGR32, .cxformat = ColorFormatRGB32, .depth = 32, .flags = FORMAT_FLAGS_PACKED, }, { .fourcc = V4L2_PIX_FMT_RGB32, .cxformat = ColorFormatRGB32 | ColorFormatBSWAP | ColorFormatWSWAP, .depth = 32, .flags = FORMAT_FLAGS_PACKED, }, { .fourcc = V4L2_PIX_FMT_YUYV, .cxformat = ColorFormatYUY2, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, { .fourcc = V4L2_PIX_FMT_UYVY, .cxformat = ColorFormatYUY2 | ColorFormatBSWAP, .depth = 16, .flags = FORMAT_FLAGS_PACKED, }, }; static const struct cx8800_fmt *format_by_fourcc(unsigned int fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(formats); i++) if (formats[i].fourcc == fourcc) return formats + i; return NULL; } /* ------------------------------------------------------------------- */ struct cx88_ctrl { /* control information */ u32 id; s32 minimum; s32 maximum; u32 step; s32 default_value; /* control register information */ u32 off; u32 reg; u32 sreg; u32 mask; u32 shift; }; static const struct cx88_ctrl cx8800_vid_ctls[] = { /* --- video --- */ { .id = V4L2_CID_BRIGHTNESS, .minimum = 0x00, .maximum = 0xff, .step = 1, .default_value = 0x7f, .off = 128, .reg = MO_CONTR_BRIGHT, .mask = 0x00ff, .shift = 0, }, { .id = V4L2_CID_CONTRAST, .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0x3f, .off = 0, .reg = MO_CONTR_BRIGHT, .mask = 0xff00, .shift = 8, }, { .id = V4L2_CID_HUE, .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0x7f, .off = 128, .reg = MO_HUE, .mask = 0x00ff, .shift = 0, }, { /* strictly, this only describes only U saturation. * V saturation is handled specially through code. */ .id = V4L2_CID_SATURATION, .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0x7f, .off = 0, .reg = MO_UV_SATURATION, .mask = 0x00ff, .shift = 0, }, { .id = V4L2_CID_SHARPNESS, .minimum = 0, .maximum = 4, .step = 1, .default_value = 0x0, .off = 0, /* * NOTE: the value is converted and written to both even * and odd registers in the code */ .reg = MO_FILTER_ODD, .mask = 7 << 7, .shift = 7, }, { .id = V4L2_CID_CHROMA_AGC, .minimum = 0, .maximum = 1, .default_value = 0x1, .reg = MO_INPUT_FORMAT, .mask = 1 << 10, .shift = 10, }, { .id = V4L2_CID_COLOR_KILLER, .minimum = 0, .maximum = 1, .default_value = 0x1, .reg = MO_INPUT_FORMAT, .mask = 1 << 9, .shift = 9, }, { .id = V4L2_CID_BAND_STOP_FILTER, .minimum = 0, .maximum = 1, .step = 1, .default_value = 0x0, .off = 0, .reg = MO_HTOTAL, .mask = 3 << 11, .shift = 11, } }; static const struct cx88_ctrl cx8800_aud_ctls[] = { { /* --- audio --- */ .id = V4L2_CID_AUDIO_MUTE, .minimum = 0, .maximum = 1, .default_value = 1, .reg = AUD_VOL_CTL, .sreg = SHADOW_AUD_VOL_CTL, .mask = (1 << 6), .shift = 6, }, { .id = V4L2_CID_AUDIO_VOLUME, .minimum = 0, .maximum = 0x3f, .step = 1, .default_value = 0x3f, .reg = AUD_VOL_CTL, .sreg = SHADOW_AUD_VOL_CTL, .mask = 0x3f, .shift = 0, }, { .id = V4L2_CID_AUDIO_BALANCE, .minimum = 0, .maximum = 0x7f, .step = 1, .default_value = 0x40, .reg = AUD_BAL_CTL, .sreg = SHADOW_AUD_BAL_CTL, .mask = 0x7f, .shift = 0, } }; enum { CX8800_VID_CTLS = ARRAY_SIZE(cx8800_vid_ctls), CX8800_AUD_CTLS = ARRAY_SIZE(cx8800_aud_ctls), }; /* ------------------------------------------------------------------ */ int cx88_video_mux(struct cx88_core *core, unsigned int input) { /* struct cx88_core *core = dev->core; */ dprintk(1, "video_mux: %d [vmux=%d,gpio=0x%x,0x%x,0x%x,0x%x]\n", input, INPUT(input).vmux, INPUT(input).gpio0, INPUT(input).gpio1, INPUT(input).gpio2, INPUT(input).gpio3); core->input = input; cx_andor(MO_INPUT_FORMAT, 0x03 << 14, INPUT(input).vmux << 14); cx_write(MO_GP3_IO, INPUT(input).gpio3); cx_write(MO_GP0_IO, INPUT(input).gpio0); cx_write(MO_GP1_IO, INPUT(input).gpio1); cx_write(MO_GP2_IO, INPUT(input).gpio2); switch (INPUT(input).type) { case CX88_VMUX_SVIDEO: cx_set(MO_AFECFG_IO, 0x00000001); cx_set(MO_INPUT_FORMAT, 0x00010010); cx_set(MO_FILTER_EVEN, 0x00002020); cx_set(MO_FILTER_ODD, 0x00002020); break; default: cx_clear(MO_AFECFG_IO, 0x00000001); cx_clear(MO_INPUT_FORMAT, 0x00010010); cx_clear(MO_FILTER_EVEN, 0x00002020); cx_clear(MO_FILTER_ODD, 0x00002020); break; } /* * if there are audioroutes defined, we have an external * ADC to deal with audio */ if (INPUT(input).audioroute) { /* * The wm8775 module has the "2" route hardwired into * the initialization. Some boards may use different * routes for different inputs. HVR-1300 surely does */ if (core->sd_wm8775) { call_all(core, audio, s_routing, INPUT(input).audioroute, 0, 0); } /* * cx2388's C-ADC is connected to the tuner only. * When used with S-Video, that ADC is busy dealing with * chroma, so an external must be used for baseband audio */ if (INPUT(input).type != CX88_VMUX_TELEVISION && INPUT(input).type != CX88_VMUX_CABLE) { /* "I2S ADC mode" */ core->tvaudio = WW_I2SADC; cx88_set_tvaudio(core); } else { /* Normal mode */ cx_write(AUD_I2SCNTL, 0x0); cx_clear(AUD_CTL, EN_I2SIN_ENABLE); } } return 0; } EXPORT_SYMBOL(cx88_video_mux); /* ------------------------------------------------------------------ */ static int start_video_dma(struct cx8800_dev *dev, struct cx88_dmaqueue *q, struct cx88_buffer *buf) { struct cx88_core *core = dev->core; /* setup fifo + format */ cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21], buf->bpl, buf->risc.dma); cx88_set_scale(core, core->width, core->height, core->field); cx_write(MO_COLOR_CTRL, dev->fmt->cxformat | ColorFormatGamma); /* reset counter */ cx_write(MO_VIDY_GPCNTRL, GP_COUNT_CONTROL_RESET); q->count = 0; /* enable irqs */ cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT); /* * Enables corresponding bits at PCI_INT_STAT: * bits 0 to 4: video, audio, transport stream, VIP, Host * bit 7: timer * bits 8 and 9: DMA complete for: SRC, DST * bits 10 and 11: BERR signal asserted for RISC: RD, WR * bits 12 to 15: BERR signal asserted for: BRDG, SRC, DST, IPB */ cx_set(MO_VID_INTMSK, 0x0f0011); /* enable capture */ cx_set(VID_CAPTURE_CONTROL, 0x06); /* start dma */ cx_set(MO_DEV_CNTRL2, (1 << 5)); cx_set(MO_VID_DMACNTRL, 0x11); /* Planar Y and packed FIFO and RISC enable */ return 0; } static int __maybe_unused stop_video_dma(struct cx8800_dev *dev) { struct cx88_core *core = dev->core; /* stop dma */ cx_clear(MO_VID_DMACNTRL, 0x11); /* disable capture */ cx_clear(VID_CAPTURE_CONTROL, 0x06); /* disable irqs */ cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT); cx_clear(MO_VID_INTMSK, 0x0f0011); return 0; } static int __maybe_unused restart_video_queue(struct cx8800_dev *dev, struct cx88_dmaqueue *q) { struct cx88_buffer *buf; if (!list_empty(&q->active)) { buf = list_entry(q->active.next, struct cx88_buffer, list); dprintk(2, "restart_queue [%p/%d]: restart dma\n", buf, buf->vb.vb2_buf.index); start_video_dma(dev, q, buf); } return 0; } /* ------------------------------------------------------------------ */ static int queue_setup(struct vb2_queue *q, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], struct device *alloc_devs[]) { struct cx8800_dev *dev = q->drv_priv; struct cx88_core *core = dev->core; *num_planes = 1; sizes[0] = (dev->fmt->depth * core->width * core->height) >> 3; return 0; } static int buffer_prepare(struct vb2_buffer *vb) { int ret; struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct cx8800_dev *dev = vb->vb2_queue->drv_priv; struct cx88_core *core = dev->core; struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb); struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); buf->bpl = core->width * dev->fmt->depth >> 3; if (vb2_plane_size(vb, 0) < core->height * buf->bpl) return -EINVAL; vb2_set_plane_payload(vb, 0, core->height * buf->bpl); switch (core->field) { case V4L2_FIELD_TOP: ret = cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl, 0, UNSET, buf->bpl, 0, core->height); break; case V4L2_FIELD_BOTTOM: ret = cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl, UNSET, 0, buf->bpl, 0, core->height); break; case V4L2_FIELD_SEQ_TB: ret = cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl, 0, buf->bpl * (core->height >> 1), buf->bpl, 0, core->height >> 1); break; case V4L2_FIELD_SEQ_BT: ret = cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl, buf->bpl * (core->height >> 1), 0, buf->bpl, 0, core->height >> 1); break; case V4L2_FIELD_INTERLACED: default: ret = cx88_risc_buffer(dev->pci, &buf->risc, sgt->sgl, 0, buf->bpl, buf->bpl, buf->bpl, core->height >> 1); break; } dprintk(2, "[%p/%d] %s - %dx%d %dbpp 0x%08x - dma=0x%08lx\n", buf, buf->vb.vb2_buf.index, __func__, core->width, core->height, dev->fmt->depth, dev->fmt->fourcc, (unsigned long)buf->risc.dma); return ret; } static void buffer_finish(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct cx8800_dev *dev = vb->vb2_queue->drv_priv; struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb); struct cx88_riscmem *risc = &buf->risc; if (risc->cpu) dma_free_coherent(&dev->pci->dev, risc->size, risc->cpu, risc->dma); memset(risc, 0, sizeof(*risc)); } static void buffer_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct cx8800_dev *dev = vb->vb2_queue->drv_priv; struct cx88_buffer *buf = container_of(vbuf, struct cx88_buffer, vb); struct cx88_buffer *prev; struct cx88_dmaqueue *q = &dev->vidq; /* add jump to start */ buf->risc.cpu[1] = cpu_to_le32(buf->risc.dma + 8); buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma + 8); if (list_empty(&q->active)) { list_add_tail(&buf->list, &q->active); dprintk(2, "[%p/%d] buffer_queue - first active\n", buf, buf->vb.vb2_buf.index); } else { buf->risc.cpu[0] |= cpu_to_le32(RISC_IRQ1); prev = list_entry(q->active.prev, struct cx88_buffer, list); list_add_tail(&buf->list, &q->active); prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); dprintk(2, "[%p/%d] buffer_queue - append to active\n", buf, buf->vb.vb2_buf.index); } } static int start_streaming(struct vb2_queue *q, unsigned int count) { struct cx8800_dev *dev = q->drv_priv; struct cx88_dmaqueue *dmaq = &dev->vidq; struct cx88_buffer *buf = list_entry(dmaq->active.next, struct cx88_buffer, list); start_video_dma(dev, dmaq, buf); return 0; } static void stop_streaming(struct vb2_queue *q) { struct cx8800_dev *dev = q->drv_priv; struct cx88_core *core = dev->core; struct cx88_dmaqueue *dmaq = &dev->vidq; unsigned long flags; cx_clear(MO_VID_DMACNTRL, 0x11); cx_clear(VID_CAPTURE_CONTROL, 0x06); spin_lock_irqsave(&dev->slock, flags); while (!list_empty(&dmaq->active)) { struct cx88_buffer *buf = list_entry(dmaq->active.next, struct cx88_buffer, list); list_del(&buf->list); vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&dev->slock, flags); } static const struct vb2_ops cx8800_video_qops = { .queue_setup = queue_setup, .buf_prepare = buffer_prepare, .buf_finish = buffer_finish, .buf_queue = buffer_queue, .start_streaming = start_streaming, .stop_streaming = stop_streaming, }; /* ------------------------------------------------------------------ */ static int radio_open(struct file *file) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; int ret = v4l2_fh_open(file); if (ret) return ret; cx_write(MO_GP3_IO, core->board.radio.gpio3); cx_write(MO_GP0_IO, core->board.radio.gpio0); cx_write(MO_GP1_IO, core->board.radio.gpio1); cx_write(MO_GP2_IO, core->board.radio.gpio2); if (core->board.radio.audioroute) { if (core->sd_wm8775) { call_all(core, audio, s_routing, core->board.radio.audioroute, 0, 0); } /* "I2S ADC mode" */ core->tvaudio = WW_I2SADC; cx88_set_tvaudio(core); } else { /* FM Mode */ core->tvaudio = WW_FM; cx88_set_tvaudio(core); cx88_set_stereo(core, V4L2_TUNER_MODE_STEREO, 1); } call_all(core, tuner, s_radio); return 0; } /* ------------------------------------------------------------------ */ /* VIDEO CTRL IOCTLS */ static int cx8800_s_vid_ctrl(struct v4l2_ctrl *ctrl) { struct cx88_core *core = container_of(ctrl->handler, struct cx88_core, video_hdl); const struct cx88_ctrl *cc = ctrl->priv; u32 value, mask; mask = cc->mask; switch (ctrl->id) { case V4L2_CID_SATURATION: /* special v_sat handling */ value = ((ctrl->val - cc->off) << cc->shift) & cc->mask; if (core->tvnorm & V4L2_STD_SECAM) { /* For SECAM, both U and V sat should be equal */ value = value << 8 | value; } else { /* Keeps U Saturation proportional to V Sat */ value = (value * 0x5a) / 0x7f << 8 | value; } mask = 0xffff; break; case V4L2_CID_SHARPNESS: /* 0b000, 0b100, 0b101, 0b110, or 0b111 */ value = (ctrl->val < 1 ? 0 : ((ctrl->val + 3) << 7)); /* needs to be set for both fields */ cx_andor(MO_FILTER_EVEN, mask, value); break; case V4L2_CID_CHROMA_AGC: value = ((ctrl->val - cc->off) << cc->shift) & cc->mask; break; default: value = ((ctrl->val - cc->off) << cc->shift) & cc->mask; break; } dprintk(1, "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n", ctrl->id, ctrl->name, ctrl->val, cc->reg, value, mask, cc->sreg ? " [shadowed]" : ""); if (cc->sreg) cx_sandor(cc->sreg, cc->reg, mask, value); else cx_andor(cc->reg, mask, value); return 0; } static int cx8800_s_aud_ctrl(struct v4l2_ctrl *ctrl) { struct cx88_core *core = container_of(ctrl->handler, struct cx88_core, audio_hdl); const struct cx88_ctrl *cc = ctrl->priv; u32 value, mask; /* Pass changes onto any WM8775 */ if (core->sd_wm8775) { switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: wm8775_s_ctrl(core, ctrl->id, ctrl->val); break; case V4L2_CID_AUDIO_VOLUME: wm8775_s_ctrl(core, ctrl->id, (ctrl->val) ? (0x90 + ctrl->val) << 8 : 0); break; case V4L2_CID_AUDIO_BALANCE: wm8775_s_ctrl(core, ctrl->id, ctrl->val << 9); break; default: break; } } mask = cc->mask; switch (ctrl->id) { case V4L2_CID_AUDIO_BALANCE: value = (ctrl->val < 0x40) ? (0x7f - ctrl->val) : (ctrl->val - 0x40); break; case V4L2_CID_AUDIO_VOLUME: value = 0x3f - (ctrl->val & 0x3f); break; default: value = ((ctrl->val - cc->off) << cc->shift) & cc->mask; break; } dprintk(1, "set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n", ctrl->id, ctrl->name, ctrl->val, cc->reg, value, mask, cc->sreg ? " [shadowed]" : ""); if (cc->sreg) cx_sandor(cc->sreg, cc->reg, mask, value); else cx_andor(cc->reg, mask, value); return 0; } /* ------------------------------------------------------------------ */ /* VIDEO IOCTLS */ static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; f->fmt.pix.width = core->width; f->fmt.pix.height = core->height; f->fmt.pix.field = core->field; f->fmt.pix.pixelformat = dev->fmt->fourcc; f->fmt.pix.bytesperline = (f->fmt.pix.width * dev->fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; const struct cx8800_fmt *fmt; enum v4l2_field field; unsigned int maxw, maxh; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (!fmt) return -EINVAL; maxw = norm_maxw(core->tvnorm); maxh = norm_maxh(core->tvnorm); field = f->fmt.pix.field; switch (field) { case V4L2_FIELD_TOP: case V4L2_FIELD_BOTTOM: case V4L2_FIELD_INTERLACED: case V4L2_FIELD_SEQ_BT: case V4L2_FIELD_SEQ_TB: break; default: field = (f->fmt.pix.height > maxh / 2) ? V4L2_FIELD_INTERLACED : V4L2_FIELD_BOTTOM; break; } if (V4L2_FIELD_HAS_T_OR_B(field)) maxh /= 2; v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2, &f->fmt.pix.height, 32, maxh, 0, 0); f->fmt.pix.field = field; f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; int err = vidioc_try_fmt_vid_cap(file, priv, f); if (err != 0) return err; if (vb2_is_busy(&dev->vb2_vidq) || vb2_is_busy(&dev->vb2_vbiq)) return -EBUSY; if (core->dvbdev && vb2_is_busy(&core->dvbdev->vb2_mpegq)) return -EBUSY; dev->fmt = format_by_fourcc(f->fmt.pix.pixelformat); core->width = f->fmt.pix.width; core->height = f->fmt.pix.height; core->field = f->fmt.pix.field; return 0; } int cx88_querycap(struct file *file, struct cx88_core *core, struct v4l2_capability *cap) { strscpy(cap->card, core->board.name, sizeof(cap->card)); cap->capabilities = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VBI_CAPTURE | V4L2_CAP_DEVICE_CAPS; if (core->board.tuner_type != UNSET) cap->capabilities |= V4L2_CAP_TUNER; if (core->board.radio.type == CX88_RADIO) cap->capabilities |= V4L2_CAP_RADIO; return 0; } EXPORT_SYMBOL(cx88_querycap); static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; strscpy(cap->driver, "cx8800", sizeof(cap->driver)); return cx88_querycap(file, core, cap); } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (unlikely(f->index >= ARRAY_SIZE(formats))) return -EINVAL; f->pixelformat = formats[f->index].fourcc; return 0; } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *tvnorm) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; *tvnorm = core->tvnorm; return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id tvnorms) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; return cx88_set_tvnorm(core, tvnorms); } /* only one input in this sample driver */ int cx88_enum_input(struct cx88_core *core, struct v4l2_input *i) { static const char * const iname[] = { [CX88_VMUX_COMPOSITE1] = "Composite1", [CX88_VMUX_COMPOSITE2] = "Composite2", [CX88_VMUX_COMPOSITE3] = "Composite3", [CX88_VMUX_COMPOSITE4] = "Composite4", [CX88_VMUX_SVIDEO] = "S-Video", [CX88_VMUX_TELEVISION] = "Television", [CX88_VMUX_CABLE] = "Cable TV", [CX88_VMUX_DVB] = "DVB", [CX88_VMUX_DEBUG] = "for debug only", }; unsigned int n = i->index; if (n >= 4) return -EINVAL; if (!INPUT(n).type) return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; strscpy(i->name, iname[INPUT(n).type], sizeof(i->name)); if ((INPUT(n).type == CX88_VMUX_TELEVISION) || (INPUT(n).type == CX88_VMUX_CABLE)) i->type = V4L2_INPUT_TYPE_TUNER; i->std = CX88_NORMS; return 0; } EXPORT_SYMBOL(cx88_enum_input); static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; return cx88_enum_input(core, i); } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; *i = core->input; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; if (i >= 4) return -EINVAL; if (!INPUT(i).type) return -EINVAL; cx88_newstation(core); cx88_video_mux(core, i); return 0; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; u32 reg; if (unlikely(core->board.tuner_type == UNSET)) return -EINVAL; if (t->index != 0) return -EINVAL; strscpy(t->name, "Television", sizeof(t->name)); t->capability = V4L2_TUNER_CAP_NORM; t->rangehigh = 0xffffffffUL; call_all(core, tuner, g_tuner, t); cx88_get_stereo(core, t); reg = cx_read(MO_DEVICE_STATUS); t->signal = (reg & (1 << 5)) ? 0xffff : 0x0000; return 0; } static int vidioc_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; if (core->board.tuner_type == UNSET) return -EINVAL; if (t->index != 0) return -EINVAL; cx88_set_stereo(core, t->audmode, 1); return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; if (unlikely(core->board.tuner_type == UNSET)) return -EINVAL; if (f->tuner) return -EINVAL; f->frequency = core->freq; call_all(core, tuner, g_frequency, f); return 0; } int cx88_set_freq(struct cx88_core *core, const struct v4l2_frequency *f) { struct v4l2_frequency new_freq = *f; if (unlikely(core->board.tuner_type == UNSET)) return -EINVAL; if (unlikely(f->tuner != 0)) return -EINVAL; cx88_newstation(core); call_all(core, tuner, s_frequency, f); call_all(core, tuner, g_frequency, &new_freq); core->freq = new_freq.frequency; /* When changing channels it is required to reset TVAUDIO */ usleep_range(10000, 20000); cx88_set_tvaudio(core); return 0; } EXPORT_SYMBOL(cx88_set_freq); static int vidioc_s_frequency(struct file *file, void *priv, const struct v4l2_frequency *f) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; return cx88_set_freq(core, f); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int vidioc_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; /* cx2388x has a 24-bit register space */ reg->val = cx_read(reg->reg & 0xfffffc); reg->size = 4; return 0; } static int vidioc_s_register(struct file *file, void *fh, const struct v4l2_dbg_register *reg) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; cx_write(reg->reg & 0xfffffc, reg->val); return 0; } #endif /* ----------------------------------------------------------- */ /* RADIO ESPECIFIC IOCTLS */ /* ----------------------------------------------------------- */ static int radio_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; if (unlikely(t->index > 0)) return -EINVAL; strscpy(t->name, "Radio", sizeof(t->name)); call_all(core, tuner, g_tuner, t); return 0; } static int radio_s_tuner(struct file *file, void *priv, const struct v4l2_tuner *t) { struct cx8800_dev *dev = video_drvdata(file); struct cx88_core *core = dev->core; if (t->index != 0) return -EINVAL; call_all(core, tuner, s_tuner, t); return 0; } /* ----------------------------------------------------------- */ static const char *cx88_vid_irqs[32] = { "y_risci1", "u_risci1", "v_risci1", "vbi_risc1", "y_risci2", "u_risci2", "v_risci2", "vbi_risc2", "y_oflow", "u_oflow", "v_oflow", "vbi_oflow", "y_sync", "u_sync", "v_sync", "vbi_sync", "opc_err", "par_err", "rip_err", "pci_abort", }; static void cx8800_vid_irq(struct cx8800_dev *dev) { struct cx88_core *core = dev->core; u32 status, mask, count; status = cx_read(MO_VID_INTSTAT); mask = cx_read(MO_VID_INTMSK); if (0 == (status & mask)) return; cx_write(MO_VID_INTSTAT, status); if (irq_debug || (status & mask & ~0xff)) cx88_print_irqbits("irq vid", cx88_vid_irqs, ARRAY_SIZE(cx88_vid_irqs), status, mask); /* risc op code error */ if (status & (1 << 16)) { pr_warn("video risc op code error\n"); cx_clear(MO_VID_DMACNTRL, 0x11); cx_clear(VID_CAPTURE_CONTROL, 0x06); cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]); } /* risc1 y */ if (status & 0x01) { spin_lock(&dev->slock); count = cx_read(MO_VIDY_GPCNT); cx88_wakeup(core, &dev->vidq, count); spin_unlock(&dev->slock); } /* risc1 vbi */ if (status & 0x08) { spin_lock(&dev->slock); count = cx_read(MO_VBI_GPCNT); cx88_wakeup(core, &dev->vbiq, count); spin_unlock(&dev->slock); } } static irqreturn_t cx8800_irq(int irq, void *dev_id) { struct cx8800_dev *dev = dev_id; struct cx88_core *core = dev->core; u32 status; int loop, handled = 0; for (loop = 0; loop < 10; loop++) { status = cx_read(MO_PCI_INTSTAT) & (core->pci_irqmask | PCI_INT_VIDINT); if (status == 0) goto out; cx_write(MO_PCI_INTSTAT, status); handled = 1; if (status & core->pci_irqmask) cx88_core_irq(core, status); if (status & PCI_INT_VIDINT) cx8800_vid_irq(dev); } if (loop == 10) { pr_warn("irq loop -- clearing mask\n"); cx_write(MO_PCI_INTMSK, 0); } out: return IRQ_RETVAL(handled); } /* ----------------------------------------------------------- */ /* exported stuff */ static const struct v4l2_file_operations video_fops = { .owner = THIS_MODULE, .open = v4l2_fh_open, .release = vb2_fop_release, .read = vb2_fop_read, .poll = vb2_fop_poll, .mmap = vb2_fop_mmap, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static const struct video_device cx8800_video_template = { .name = "cx8800-video", .fops = &video_fops, .ioctl_ops = &video_ioctl_ops, .tvnorms = CX88_NORMS, }; static const struct v4l2_ioctl_ops vbi_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_fmt_vbi_cap = cx8800_vbi_fmt, .vidioc_try_fmt_vbi_cap = cx8800_vbi_fmt, .vidioc_s_fmt_vbi_cap = cx8800_vbi_fmt, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static const struct video_device cx8800_vbi_template = { .name = "cx8800-vbi", .fops = &video_fops, .ioctl_ops = &vbi_ioctl_ops, .tvnorms = CX88_NORMS, }; static const struct v4l2_file_operations radio_fops = { .owner = THIS_MODULE, .open = radio_open, .poll = v4l2_ctrl_poll, .release = v4l2_fh_release, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops radio_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_g_tuner = radio_g_tuner, .vidioc_s_tuner = radio_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static const struct video_device cx8800_radio_template = { .name = "cx8800-radio", .fops = &radio_fops, .ioctl_ops = &radio_ioctl_ops, }; static const struct v4l2_ctrl_ops cx8800_ctrl_vid_ops = { .s_ctrl = cx8800_s_vid_ctrl, }; static const struct v4l2_ctrl_ops cx8800_ctrl_aud_ops = { .s_ctrl = cx8800_s_aud_ctrl, }; /* ----------------------------------------------------------- */ static void cx8800_unregister_video(struct cx8800_dev *dev) { video_unregister_device(&dev->radio_dev); video_unregister_device(&dev->vbi_dev); video_unregister_device(&dev->video_dev); } static int cx8800_initdev(struct pci_dev *pci_dev, const struct pci_device_id *pci_id) { struct cx8800_dev *dev; struct cx88_core *core; struct vb2_queue *q; int err; int i; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; /* pci init */ dev->pci = pci_dev; if (pci_enable_device(pci_dev)) { err = -EIO; goto fail_free; } core = cx88_core_get(dev->pci); if (!core) { err = -EINVAL; goto fail_disable; } dev->core = core; /* print pci info */ dev->pci_rev = pci_dev->revision; pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat); pr_info("found at %s, rev: %d, irq: %d, latency: %d, mmio: 0x%llx\n", pci_name(pci_dev), dev->pci_rev, pci_dev->irq, dev->pci_lat, (unsigned long long)pci_resource_start(pci_dev, 0)); pci_set_master(pci_dev); err = dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32)); if (err) { pr_err("Oops: no 32bit PCI DMA ???\n"); goto fail_core; } /* initialize driver struct */ spin_lock_init(&dev->slock); /* init video dma queues */ INIT_LIST_HEAD(&dev->vidq.active); /* init vbi dma queues */ INIT_LIST_HEAD(&dev->vbiq.active); /* get irq */ err = request_irq(pci_dev->irq, cx8800_irq, IRQF_SHARED, core->name, dev); if (err < 0) { pr_err("can't get IRQ %d\n", pci_dev->irq); goto fail_core; } cx_set(MO_PCI_INTMSK, core->pci_irqmask); for (i = 0; i < CX8800_AUD_CTLS; i++) { const struct cx88_ctrl *cc = &cx8800_aud_ctls[i]; struct v4l2_ctrl *vc; vc = v4l2_ctrl_new_std(&core->audio_hdl, &cx8800_ctrl_aud_ops, cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value); if (!vc) { err = core->audio_hdl.error; goto fail_irq; } vc->priv = (void *)cc; } for (i = 0; i < CX8800_VID_CTLS; i++) { const struct cx88_ctrl *cc = &cx8800_vid_ctls[i]; struct v4l2_ctrl *vc; vc = v4l2_ctrl_new_std(&core->video_hdl, &cx8800_ctrl_vid_ops, cc->id, cc->minimum, cc->maximum, cc->step, cc->default_value); if (!vc) { err = core->video_hdl.error; goto fail_irq; } vc->priv = (void *)cc; if (vc->id == V4L2_CID_CHROMA_AGC) core->chroma_agc = vc; } v4l2_ctrl_add_handler(&core->video_hdl, &core->audio_hdl, NULL, false); /* load and configure helper modules */ if (core->board.audio_chip == CX88_AUDIO_WM8775) { struct i2c_board_info wm8775_info = { .type = "wm8775", .addr = 0x36 >> 1, .platform_data = &core->wm8775_data, }; struct v4l2_subdev *sd; if (core->boardnr == CX88_BOARD_HAUPPAUGE_NOVASPLUS_S1) core->wm8775_data.is_nova_s = true; else core->wm8775_data.is_nova_s = false; sd = v4l2_i2c_new_subdev_board(&core->v4l2_dev, &core->i2c_adap, &wm8775_info, NULL); if (sd) { core->sd_wm8775 = sd; sd->grp_id = WM8775_GID; } } if (core->board.audio_chip == CX88_AUDIO_TVAUDIO) { /* * This probes for a tda9874 as is used on some * Pixelview Ultra boards. */ v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap, "tvaudio", 0, I2C_ADDRS(0xb0 >> 1)); } switch (core->boardnr) { case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD: case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD: { static const struct i2c_board_info rtc_info = { I2C_BOARD_INFO("isl1208", 0x6f) }; request_module("rtc-isl1208"); core->i2c_rtc = i2c_new_client_device(&core->i2c_adap, &rtc_info); } fallthrough; case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: case CX88_BOARD_NOTONLYTV_LV3H: request_module("ir-kbd-i2c"); } /* Sets device info at pci_dev */ pci_set_drvdata(pci_dev, dev); dev->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24); /* Maintain a reference so cx88-blackbird can query the 8800 device. */ core->v4ldev = dev; /* initial device configuration */ mutex_lock(&core->lock); cx88_set_tvnorm(core, V4L2_STD_NTSC_M); v4l2_ctrl_handler_setup(&core->video_hdl); v4l2_ctrl_handler_setup(&core->audio_hdl); cx88_video_mux(core, 0); q = &dev->vb2_vidq; q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q->gfp_flags = GFP_DMA32; q->min_queued_buffers = 2; q->drv_priv = dev; q->buf_struct_size = sizeof(struct cx88_buffer); q->ops = &cx8800_video_qops; q->mem_ops = &vb2_dma_sg_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &core->lock; q->dev = &dev->pci->dev; err = vb2_queue_init(q); if (err < 0) goto fail_unreg; q = &dev->vb2_vbiq; q->type = V4L2_BUF_TYPE_VBI_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; q->gfp_flags = GFP_DMA32; q->min_queued_buffers = 2; q->drv_priv = dev; q->buf_struct_size = sizeof(struct cx88_buffer); q->ops = &cx8800_vbi_qops; q->mem_ops = &vb2_dma_sg_memops; q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &core->lock; q->dev = &dev->pci->dev; err = vb2_queue_init(q); if (err < 0) goto fail_unreg; /* register v4l devices */ cx88_vdev_init(core, dev->pci, &dev->video_dev, &cx8800_video_template, "video"); video_set_drvdata(&dev->video_dev, dev); dev->video_dev.ctrl_handler = &core->video_hdl; dev->video_dev.queue = &dev->vb2_vidq; dev->video_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE; if (core->board.tuner_type != UNSET) dev->video_dev.device_caps |= V4L2_CAP_TUNER; err = video_register_device(&dev->video_dev, VFL_TYPE_VIDEO, video_nr[core->nr]); if (err < 0) { pr_err("can't register video device\n"); goto fail_unreg; } pr_info("registered device %s [v4l2]\n", video_device_node_name(&dev->video_dev)); cx88_vdev_init(core, dev->pci, &dev->vbi_dev, &cx8800_vbi_template, "vbi"); video_set_drvdata(&dev->vbi_dev, dev); dev->vbi_dev.queue = &dev->vb2_vbiq; dev->vbi_dev.device_caps = V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | V4L2_CAP_VBI_CAPTURE; if (core->board.tuner_type != UNSET) dev->vbi_dev.device_caps |= V4L2_CAP_TUNER; err = video_register_device(&dev->vbi_dev, VFL_TYPE_VBI, vbi_nr[core->nr]); if (err < 0) { pr_err("can't register vbi device\n"); goto fail_unreg; } pr_info("registered device %s\n", video_device_node_name(&dev->vbi_dev)); if (core->board.radio.type == CX88_RADIO) { cx88_vdev_init(core, dev->pci, &dev->radio_dev, &cx8800_radio_template, "radio"); video_set_drvdata(&dev->radio_dev, dev); dev->radio_dev.ctrl_handler = &core->audio_hdl; dev->radio_dev.device_caps = V4L2_CAP_RADIO | V4L2_CAP_TUNER; err = video_register_device(&dev->radio_dev, VFL_TYPE_RADIO, radio_nr[core->nr]); if (err < 0) { pr_err("can't register radio device\n"); goto fail_unreg; } pr_info("registered device %s\n", video_device_node_name(&dev->radio_dev)); } /* start tvaudio thread */ if (core->board.tuner_type != UNSET) { core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio"); if (IS_ERR(core->kthread)) { err = PTR_ERR(core->kthread); pr_err("failed to create cx88 audio thread, err=%d\n", err); } } mutex_unlock(&core->lock); return 0; fail_unreg: cx8800_unregister_video(dev); mutex_unlock(&core->lock); fail_irq: free_irq(pci_dev->irq, dev); fail_core: core->v4ldev = NULL; cx88_core_put(core, dev->pci); fail_disable: pci_disable_device(pci_dev); fail_free: kfree(dev); return err; } static void cx8800_finidev(struct pci_dev *pci_dev) { struct cx8800_dev *dev = pci_get_drvdata(pci_dev); struct cx88_core *core = dev->core; /* stop thread */ if (core->kthread) { kthread_stop(core->kthread); core->kthread = NULL; } if (core->ir) cx88_ir_stop(core); cx88_shutdown(core); /* FIXME */ /* unregister stuff */ free_irq(pci_dev->irq, dev); cx8800_unregister_video(dev); pci_disable_device(pci_dev); core->v4ldev = NULL; /* free memory */ cx88_core_put(core, dev->pci); kfree(dev); } static int __maybe_unused cx8800_suspend(struct device *dev_d) { struct cx8800_dev *dev = dev_get_drvdata(dev_d); struct cx88_core *core = dev->core; unsigned long flags; /* stop video+vbi capture */ spin_lock_irqsave(&dev->slock, flags); if (!list_empty(&dev->vidq.active)) { pr_info("suspend video\n"); stop_video_dma(dev); } if (!list_empty(&dev->vbiq.active)) { pr_info("suspend vbi\n"); cx8800_stop_vbi_dma(dev); } spin_unlock_irqrestore(&dev->slock, flags); if (core->ir) cx88_ir_stop(core); /* FIXME -- shutdown device */ cx88_shutdown(core); dev->state.disabled = 1; return 0; } static int __maybe_unused cx8800_resume(struct device *dev_d) { struct cx8800_dev *dev = dev_get_drvdata(dev_d); struct cx88_core *core = dev->core; unsigned long flags; dev->state.disabled = 0; /* FIXME: re-initialize hardware */ cx88_reset(core); if (core->ir) cx88_ir_start(core); cx_set(MO_PCI_INTMSK, core->pci_irqmask); /* restart video+vbi capture */ spin_lock_irqsave(&dev->slock, flags); if (!list_empty(&dev->vidq.active)) { pr_info("resume video\n"); restart_video_queue(dev, &dev->vidq); } if (!list_empty(&dev->vbiq.active)) { pr_info("resume vbi\n"); cx8800_restart_vbi_queue(dev, &dev->vbiq); } spin_unlock_irqrestore(&dev->slock, flags); return 0; } /* ----------------------------------------------------------- */ static const struct pci_device_id cx8800_pci_tbl[] = { { .vendor = 0x14f1, .device = 0x8800, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { /* --- end of list --- */ } }; MODULE_DEVICE_TABLE(pci, cx8800_pci_tbl); static SIMPLE_DEV_PM_OPS(cx8800_pm_ops, cx8800_suspend, cx8800_resume); static struct pci_driver cx8800_pci_driver = { .name = "cx8800", .id_table = cx8800_pci_tbl, .probe = cx8800_initdev, .remove = cx8800_finidev, .driver.pm = &cx8800_pm_ops, }; module_pci_driver(cx8800_pci_driver);
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/outercache.h * * Copyright (C) 2010 ARM Ltd. * Written by Catalin Marinas <[email protected]> */ #ifndef __ASM_OUTERCACHE_H #define __ASM_OUTERCACHE_H #include <linux/types.h> struct l2x0_regs; struct outer_cache_fns { void (*inv_range)(unsigned long, unsigned long); void (*clean_range)(unsigned long, unsigned long); void (*flush_range)(unsigned long, unsigned long); void (*flush_all)(void); void (*disable)(void); #ifdef CONFIG_OUTER_CACHE_SYNC void (*sync)(void); #endif void (*resume)(void); /* This is an ARM L2C thing */ void (*write_sec)(unsigned long, unsigned); void (*configure)(const struct l2x0_regs *); }; extern struct outer_cache_fns outer_cache; #ifdef CONFIG_OUTER_CACHE /** * outer_inv_range - invalidate range of outer cache lines * @start: starting physical address, inclusive * @end: end physical address, exclusive */ static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.inv_range) outer_cache.inv_range(start, end); } /** * outer_clean_range - clean dirty outer cache lines * @start: starting physical address, inclusive * @end: end physical address, exclusive */ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.clean_range) outer_cache.clean_range(start, end); } /** * outer_flush_range - clean and invalidate outer cache lines * @start: starting physical address, inclusive * @end: end physical address, exclusive */ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.flush_range) outer_cache.flush_range(start, end); } /** * outer_flush_all - clean and invalidate all cache lines in the outer cache * * Note: depending on implementation, this may not be atomic - it must * only be called with interrupts disabled and no other active outer * cache masters. * * It is intended that this function is only used by implementations * needing to override the outer_cache.disable() method due to security. * (Some implementations perform this as a clean followed by an invalidate.) */ static inline void outer_flush_all(void) { if (outer_cache.flush_all) outer_cache.flush_all(); } /** * outer_disable - clean, invalidate and disable the outer cache * * Disable the outer cache, ensuring that any data contained in the outer * cache is pushed out to lower levels of system memory. The note and * conditions above concerning outer_flush_all() applies here. */ extern void outer_disable(void); /** * outer_resume - restore the cache configuration and re-enable outer cache * * Restore any configuration that the cache had when previously enabled, * and re-enable the outer cache. */ static inline void outer_resume(void) { if (outer_cache.resume) outer_cache.resume(); } #else static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) { } static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) { } static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) { } static inline void outer_flush_all(void) { } static inline void outer_disable(void) { } static inline void outer_resume(void) { } #endif #endif /* __ASM_OUTERCACHE_H */
/* * Copyright 2019 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_ #define AMDGPU_DM_AMDGPU_DM_HDCP_H_ #include "mod_hdcp.h" #include "hdcp.h" #include "dc.h" #include "dm_cp_psp.h" #include "amdgpu.h" struct mod_hdcp; struct mod_hdcp_link; struct mod_hdcp_display; struct cp_psp; struct hdcp_workqueue { struct work_struct cpirq_work; struct work_struct property_update_work; struct delayed_work callback_dwork; struct delayed_work watchdog_timer_dwork; struct delayed_work property_validate_dwork; struct amdgpu_dm_connector *aconnector[AMDGPU_DM_MAX_DISPLAY_INDEX]; struct mutex mutex; struct mod_hdcp hdcp; struct mod_hdcp_output output; struct mod_hdcp_display display; struct mod_hdcp_link link; enum mod_hdcp_encryption_status encryption_status[AMDGPU_DM_MAX_DISPLAY_INDEX]; /* when display is unplugged from mst hub, connctor will be * destroyed within dm_dp_mst_connector_destroy. connector * hdcp perperties, like type, undesired, desired, enabled, * will be lost. So, save hdcp properties into hdcp_work within * amdgpu_dm_atomic_commit_tail. if the same display is * plugged back with same display index, its hdcp properties * will be retrieved from hdcp_work within dm_dp_mst_get_modes */ /* un-desired, desired, enabled */ unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX]; /* hdcp1.x, hdcp2.x */ unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX]; uint8_t max_link; uint8_t *srm; uint8_t *srm_temp; uint32_t srm_version; uint32_t srm_size; struct bin_attribute attr; }; void hdcp_update_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index, struct amdgpu_dm_connector *aconnector, uint8_t content_type, bool enable_encryption); void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *work); struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc); #endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2017, The Linux Foundation. All rights reserved. */ #ifndef QCOM_PHY_QMP_PCS_V5_H_ #define QCOM_PHY_QMP_PCS_V5_H_ /* Only for QMP V5 PHY - USB/PCIe PCS registers */ #define QPHY_V5_PCS_SW_RESET 0x000 #define QPHY_V5_PCS_PCS_STATUS1 0x014 #define QPHY_V5_PCS_POWER_DOWN_CONTROL 0x040 #define QPHY_V5_PCS_START_CONTROL 0x044 #define QPHY_V5_PCS_LOCK_DETECT_CONFIG1 0x0c4 #define QPHY_V5_PCS_LOCK_DETECT_CONFIG2 0x0c8 #define QPHY_V5_PCS_LOCK_DETECT_CONFIG3 0x0cc #define QPHY_V5_PCS_LOCK_DETECT_CONFIG6 0x0d8 #define QPHY_V5_PCS_REFGEN_REQ_CONFIG1 0x0dc #define QPHY_V5_PCS_G3S2_PRE_GAIN 0x170 #define QPHY_V5_PCS_RX_SIGDET_LVL 0x188 #define QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_L 0x190 #define QPHY_V5_PCS_RCVR_DTCT_DLY_P1U2_H 0x194 #define QPHY_V5_PCS_RATE_SLEW_CNTRL1 0x198 #define QPHY_V5_PCS_CDR_RESET_TIME 0x1b0 #define QPHY_V5_PCS_RX_CONFIG 0x1b0 #define QPHY_V5_PCS_ALIGN_DETECT_CONFIG1 0x1c0 #define QPHY_V5_PCS_ALIGN_DETECT_CONFIG2 0x1c4 #define QPHY_V5_PCS_PCS_TX_RX_CONFIG 0x1d0 #define QPHY_V5_PCS_EQ_CONFIG1 0x1dc #define QPHY_V5_PCS_EQ_CONFIG2 0x1e0 #define QPHY_V5_PCS_EQ_CONFIG3 0x1e4 #define QPHY_V5_PCS_EQ_CONFIG5 0x1ec #endif
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* * Copyright(c) 2018 Intel Corporation. * */ #if !defined(__HFI1_TRACE_TID_H) || defined(TRACE_HEADER_MULTI_READ) #define __HFI1_TRACE_TID_H #include <linux/tracepoint.h> #include <linux/trace_seq.h> #include "hfi.h" #define tidtype_name(type) { PT_##type, #type } #define show_tidtype(type) \ __print_symbolic(type, \ tidtype_name(EXPECTED), \ tidtype_name(EAGER), \ tidtype_name(INVALID)) \ #undef TRACE_SYSTEM #define TRACE_SYSTEM hfi1_tid u8 hfi1_trace_get_tid_ctrl(u32 ent); u16 hfi1_trace_get_tid_len(u32 ent); u16 hfi1_trace_get_tid_idx(u32 ent); #define OPFN_PARAM_PRN "[%s] qpn 0x%x %s OPFN: qp 0x%x, max read %u, " \ "max write %u, max length %u, jkey 0x%x timeout %u " \ "urg %u" #define TID_FLOW_PRN "[%s] qpn 0x%x flow %d: idx %d resp_ib_psn 0x%x " \ "generation 0x%x fpsn 0x%x-%x r_next_psn 0x%x " \ "ib_psn 0x%x-%x npagesets %u tnode_cnt %u " \ "tidcnt %u tid_idx %u tid_offset %u length %u sent %u" #define TID_NODE_PRN "[%s] qpn 0x%x %s idx %u grp base 0x%x map 0x%x " \ "used %u cnt %u" #define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \ "r_psn 0x%x r_state 0x%x r_flags 0x%x " \ "r_head_ack_queue %u s_tail_ack_queue %u " \ "s_acked_ack_queue %u s_ack_state 0x%x " \ "s_nak_state 0x%x s_flags 0x%x ps_flags 0x%x " \ "iow_flags 0x%lx" #define SENDER_INFO_PRN "[%s] qpn 0x%x state 0x%x s_cur %u s_tail %u " \ "s_head %u s_acked %u s_last %u s_psn 0x%x " \ "s_last_psn 0x%x s_flags 0x%x ps_flags 0x%x " \ "iow_flags 0x%lx s_state 0x%x s_num_rd %u s_retry %u" #define TID_READ_SENDER_PRN "[%s] qpn 0x%x newreq %u tid_r_reqs %u " \ "tid_r_comp %u pending_tid_r_segs %u " \ "s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \ "s_state 0x%x hw_flow_index %u generation 0x%x " \ "fpsn 0x%x" #define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \ "cur_seg %u comp_seg %u ack_seg %u alloc_seg %u " \ "total_segs %u setup_head %u clear_tail %u flow_idx %u " \ "acked_tail %u state %u r_ack_psn 0x%x r_flow_psn 0x%x " \ "r_last_ackd 0x%x s_next_psn 0x%x" #define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \ "s_acked_ack_queue %u s_tail_ack_queue %u " \ "r_head_ack_queue %u opcode 0x%x psn 0x%x r_psn 0x%x " \ " diff %d" #define TID_WRITE_RSPDR_PRN "[%s] qpn 0x%x r_tid_head %u r_tid_tail %u " \ "r_tid_ack %u r_tid_alloc %u alloc_w_segs %u " \ "pending_tid_w_segs %u sync_pt %s " \ "ps_nak_psn 0x%x ps_nak_state 0x%x " \ "prnr_nak_state 0x%x hw_flow_index %u generation "\ "0x%x fpsn 0x%x resync %s" \ "r_next_psn_kdeth 0x%x" #define TID_WRITE_SENDER_PRN "[%s] qpn 0x%x newreq %u s_tid_cur %u " \ "s_tid_tail %u s_tid_head %u " \ "pending_tid_w_resp %u n_requests %u " \ "n_tid_requests %u s_flags 0x%x ps_flags 0x%x "\ "iow_flags 0x%lx s_state 0x%x s_retry %u" #define KDETH_EFLAGS_ERR_PRN "[%s] qpn 0x%x TID ERR: RcvType 0x%x " \ "RcvTypeError 0x%x PSN 0x%x" DECLARE_EVENT_CLASS(/* class */ hfi1_exp_tid_reg_unreg, TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, unsigned long va, unsigned long pa, dma_addr_t dma), TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma), TP_STRUCT__entry(/* entry */ __field(unsigned int, ctxt) __field(u16, subctxt) __field(u32, rarr) __field(u32, npages) __field(unsigned long, va) __field(unsigned long, pa) __field(dma_addr_t, dma) ), TP_fast_assign(/* assign */ __entry->ctxt = ctxt; __entry->subctxt = subctxt; __entry->rarr = rarr; __entry->npages = npages; __entry->va = va; __entry->pa = pa; __entry->dma = dma; ), TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx", __entry->ctxt, __entry->subctxt, __entry->rarr, __entry->npages, __entry->pa, __entry->va, __entry->dma ) ); DEFINE_EVENT(/* exp_tid_unreg */ hfi1_exp_tid_reg_unreg, hfi1_exp_tid_unreg, TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, unsigned long va, unsigned long pa, dma_addr_t dma), TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma) ); DEFINE_EVENT(/* exp_tid_reg */ hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg, TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages, unsigned long va, unsigned long pa, dma_addr_t dma), TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma) ); TRACE_EVENT(/* put_tid */ hfi1_put_tid, TP_PROTO(struct hfi1_devdata *dd, u32 index, u32 type, unsigned long pa, u16 order), TP_ARGS(dd, index, type, pa, order), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd) __field(unsigned long, pa) __field(u32, index) __field(u32, type) __field(u16, order) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd); __entry->pa = pa; __entry->index = index; __entry->type = type; __entry->order = order; ), TP_printk("[%s] type %s pa %lx index %u order %u", __get_str(dev), show_tidtype(__entry->type), __entry->pa, __entry->index, __entry->order ) ); TRACE_EVENT(/* exp_tid_inval */ hfi1_exp_tid_inval, TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr, u32 npages, dma_addr_t dma), TP_ARGS(ctxt, subctxt, va, rarr, npages, dma), TP_STRUCT__entry(/* entry */ __field(unsigned int, ctxt) __field(u16, subctxt) __field(unsigned long, va) __field(u32, rarr) __field(u32, npages) __field(dma_addr_t, dma) ), TP_fast_assign(/* assign */ __entry->ctxt = ctxt; __entry->subctxt = subctxt; __entry->va = va; __entry->rarr = rarr; __entry->npages = npages; __entry->dma = dma; ), TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx", __entry->ctxt, __entry->subctxt, __entry->rarr, __entry->npages, __entry->va, __entry->dma ) ); DECLARE_EVENT_CLASS(/* opfn_state */ hfi1_opfn_state_template, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(u16, requested) __field(u16, completed) __field(u8, curr) ), TP_fast_assign(/* assign */ struct hfi1_qp_priv *priv = qp->priv; DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->requested = priv->opfn.requested; __entry->completed = priv->opfn.completed; __entry->curr = priv->opfn.curr; ), TP_printk(/* print */ "[%s] qpn 0x%x requested 0x%x completed 0x%x curr 0x%x", __get_str(dev), __entry->qpn, __entry->requested, __entry->completed, __entry->curr ) ); DEFINE_EVENT(/* event */ hfi1_opfn_state_template, hfi1_opfn_state_conn_request, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_opfn_state_template, hfi1_opfn_state_sched_conn_request, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_opfn_state_template, hfi1_opfn_state_conn_response, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_opfn_state_template, hfi1_opfn_state_conn_reply, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_opfn_state_template, hfi1_opfn_state_conn_error, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DECLARE_EVENT_CLASS(/* opfn_data */ hfi1_opfn_data_template, TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data), TP_ARGS(qp, capcode, data), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(u32, state) __field(u8, capcode) __field(u64, data) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->state = qp->state; __entry->capcode = capcode; __entry->data = data; ), TP_printk(/* printk */ "[%s] qpn 0x%x (state 0x%x) Capcode %u data 0x%llx", __get_str(dev), __entry->qpn, __entry->state, __entry->capcode, __entry->data ) ); DEFINE_EVENT(/* event */ hfi1_opfn_data_template, hfi1_opfn_data_conn_request, TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data), TP_ARGS(qp, capcode, data) ); DEFINE_EVENT(/* event */ hfi1_opfn_data_template, hfi1_opfn_data_conn_response, TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data), TP_ARGS(qp, capcode, data) ); DEFINE_EVENT(/* event */ hfi1_opfn_data_template, hfi1_opfn_data_conn_reply, TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data), TP_ARGS(qp, capcode, data) ); DECLARE_EVENT_CLASS(/* opfn_param */ hfi1_opfn_param_template, TP_PROTO(struct rvt_qp *qp, char remote, struct tid_rdma_params *param), TP_ARGS(qp, remote, param), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(char, remote) __field(u32, param_qp) __field(u32, max_len) __field(u16, jkey) __field(u8, max_read) __field(u8, max_write) __field(u8, timeout) __field(u8, urg) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->remote = remote; __entry->param_qp = param->qp; __entry->max_len = param->max_len; __entry->jkey = param->jkey; __entry->max_read = param->max_read; __entry->max_write = param->max_write; __entry->timeout = param->timeout; __entry->urg = param->urg; ), TP_printk(/* print */ OPFN_PARAM_PRN, __get_str(dev), __entry->qpn, __entry->remote ? "remote" : "local", __entry->param_qp, __entry->max_read, __entry->max_write, __entry->max_len, __entry->jkey, __entry->timeout, __entry->urg ) ); DEFINE_EVENT(/* event */ hfi1_opfn_param_template, hfi1_opfn_param, TP_PROTO(struct rvt_qp *qp, char remote, struct tid_rdma_params *param), TP_ARGS(qp, remote, param) ); DECLARE_EVENT_CLASS(/* msg */ hfi1_msg_template, TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), TP_ARGS(qp, msg, more), TP_STRUCT__entry(/* entry */ __field(u32, qpn) __string(msg, msg) __field(u64, more) ), TP_fast_assign(/* assign */ __entry->qpn = qp ? qp->ibqp.qp_num : 0; __assign_str(msg); __entry->more = more; ), TP_printk(/* print */ "qpn 0x%x %s 0x%llx", __entry->qpn, __get_str(msg), __entry->more ) ); DEFINE_EVENT(/* event */ hfi1_msg_template, hfi1_msg_opfn_conn_request, TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), TP_ARGS(qp, msg, more) ); DEFINE_EVENT(/* event */ hfi1_msg_template, hfi1_msg_opfn_conn_error, TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), TP_ARGS(qp, msg, more) ); DEFINE_EVENT(/* event */ hfi1_msg_template, hfi1_msg_alloc_tids, TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), TP_ARGS(qp, msg, more) ); DEFINE_EVENT(/* event */ hfi1_msg_template, hfi1_msg_tid_restart_req, TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), TP_ARGS(qp, msg, more) ); DEFINE_EVENT(/* event */ hfi1_msg_template, hfi1_msg_handle_kdeth_eflags, TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), TP_ARGS(qp, msg, more) ); DEFINE_EVENT(/* event */ hfi1_msg_template, hfi1_msg_tid_timeout, TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), TP_ARGS(qp, msg, more) ); DEFINE_EVENT(/* event */ hfi1_msg_template, hfi1_msg_tid_retry_timeout, TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more), TP_ARGS(qp, msg, more) ); DECLARE_EVENT_CLASS(/* tid_flow_page */ hfi1_tid_flow_page_template, TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index, char mtu8k, char v1, void *vaddr), TP_ARGS(qp, flow, index, mtu8k, v1, vaddr), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(char, mtu8k) __field(char, v1) __field(u32, index) __field(u64, page) __field(u64, vaddr) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->mtu8k = mtu8k; __entry->v1 = v1; __entry->index = index; __entry->page = vaddr ? (u64)virt_to_page(vaddr) : 0ULL; __entry->vaddr = (u64)vaddr; ), TP_printk(/* print */ "[%s] qpn 0x%x page[%u]: page 0x%llx %s 0x%llx", __get_str(dev), __entry->qpn, __entry->index, __entry->page, __entry->mtu8k ? (__entry->v1 ? "v1" : "v0") : "vaddr", __entry->vaddr ) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_page_template, hfi1_tid_flow_page, TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index, char mtu8k, char v1, void *vaddr), TP_ARGS(qp, flow, index, mtu8k, v1, vaddr) ); DECLARE_EVENT_CLASS(/* tid_pageset */ hfi1_tid_pageset_template, TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count), TP_ARGS(qp, index, idx, count), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(u32, index) __field(u16, idx) __field(u16, count) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->index = index; __entry->idx = idx; __entry->count = count; ), TP_printk(/* print */ "[%s] qpn 0x%x list[%u]: idx %u count %u", __get_str(dev), __entry->qpn, __entry->index, __entry->idx, __entry->count ) ); DEFINE_EVENT(/* event */ hfi1_tid_pageset_template, hfi1_tid_pageset, TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count), TP_ARGS(qp, index, idx, count) ); DECLARE_EVENT_CLASS(/* tid_fow */ hfi1_tid_flow_template, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(int, index) __field(int, idx) __field(u32, resp_ib_psn) __field(u32, generation) __field(u32, fspsn) __field(u32, flpsn) __field(u32, r_next_psn) __field(u32, ib_spsn) __field(u32, ib_lpsn) __field(u32, npagesets) __field(u32, tnode_cnt) __field(u32, tidcnt) __field(u32, tid_idx) __field(u32, tid_offset) __field(u32, length) __field(u32, sent) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->index = index; __entry->idx = flow->idx; __entry->resp_ib_psn = flow->flow_state.resp_ib_psn; __entry->generation = flow->flow_state.generation; __entry->fspsn = full_flow_psn(flow, flow->flow_state.spsn); __entry->flpsn = full_flow_psn(flow, flow->flow_state.lpsn); __entry->r_next_psn = flow->flow_state.r_next_psn; __entry->ib_spsn = flow->flow_state.ib_spsn; __entry->ib_lpsn = flow->flow_state.ib_lpsn; __entry->npagesets = flow->npagesets; __entry->tnode_cnt = flow->tnode_cnt; __entry->tidcnt = flow->tidcnt; __entry->tid_idx = flow->tid_idx; __entry->tid_offset = flow->tid_offset; __entry->length = flow->length; __entry->sent = flow->sent; ), TP_printk(/* print */ TID_FLOW_PRN, __get_str(dev), __entry->qpn, __entry->index, __entry->idx, __entry->resp_ib_psn, __entry->generation, __entry->fspsn, __entry->flpsn, __entry->r_next_psn, __entry->ib_spsn, __entry->ib_lpsn, __entry->npagesets, __entry->tnode_cnt, __entry->tidcnt, __entry->tid_idx, __entry->tid_offset, __entry->length, __entry->sent ) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_alloc, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_build_read_pkt, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_build_read_resp, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_req, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_resp, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_restart_req, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_build_write_resp, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_rcv_write_resp, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_build_write_data, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_rcv_tid_ack, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_rcv_resync, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_handle_kdeth_eflags, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DEFINE_EVENT(/* event */ hfi1_tid_flow_template, hfi1_tid_flow_read_kdeth_eflags, TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow), TP_ARGS(qp, index, flow) ); DECLARE_EVENT_CLASS(/* tid_node */ hfi1_tid_node_template, TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base, u8 map, u8 used, u8 cnt), TP_ARGS(qp, msg, index, base, map, used, cnt), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __string(msg, msg) __field(u32, index) __field(u32, base) __field(u8, map) __field(u8, used) __field(u8, cnt) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __assign_str(msg); __entry->index = index; __entry->base = base; __entry->map = map; __entry->used = used; __entry->cnt = cnt; ), TP_printk(/* print */ TID_NODE_PRN, __get_str(dev), __entry->qpn, __get_str(msg), __entry->index, __entry->base, __entry->map, __entry->used, __entry->cnt ) ); DEFINE_EVENT(/* event */ hfi1_tid_node_template, hfi1_tid_node_add, TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base, u8 map, u8 used, u8 cnt), TP_ARGS(qp, msg, index, base, map, used, cnt) ); DECLARE_EVENT_CLASS(/* tid_entry */ hfi1_tid_entry_template, TP_PROTO(struct rvt_qp *qp, int index, u32 ent), TP_ARGS(qp, index, ent), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(int, index) __field(u8, ctrl) __field(u16, idx) __field(u16, len) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->index = index; __entry->ctrl = hfi1_trace_get_tid_ctrl(ent); __entry->idx = hfi1_trace_get_tid_idx(ent); __entry->len = hfi1_trace_get_tid_len(ent); ), TP_printk(/* print */ "[%s] qpn 0x%x TID entry %d: idx %u len %u ctrl 0x%x", __get_str(dev), __entry->qpn, __entry->index, __entry->idx, __entry->len, __entry->ctrl ) ); DEFINE_EVENT(/* event */ hfi1_tid_entry_template, hfi1_tid_entry_alloc, TP_PROTO(struct rvt_qp *qp, int index, u32 entry), TP_ARGS(qp, index, entry) ); DEFINE_EVENT(/* event */ hfi1_tid_entry_template, hfi1_tid_entry_build_read_resp, TP_PROTO(struct rvt_qp *qp, int index, u32 ent), TP_ARGS(qp, index, ent) ); DEFINE_EVENT(/* event */ hfi1_tid_entry_template, hfi1_tid_entry_rcv_read_req, TP_PROTO(struct rvt_qp *qp, int index, u32 ent), TP_ARGS(qp, index, ent) ); DEFINE_EVENT(/* event */ hfi1_tid_entry_template, hfi1_tid_entry_rcv_write_resp, TP_PROTO(struct rvt_qp *qp, int index, u32 entry), TP_ARGS(qp, index, entry) ); DEFINE_EVENT(/* event */ hfi1_tid_entry_template, hfi1_tid_entry_build_write_data, TP_PROTO(struct rvt_qp *qp, int index, u32 entry), TP_ARGS(qp, index, entry) ); DECLARE_EVENT_CLASS(/* rsp_info */ hfi1_responder_info_template, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(u8, state) __field(u8, s_state) __field(u32, psn) __field(u32, r_psn) __field(u8, r_state) __field(u8, r_flags) __field(u8, r_head_ack_queue) __field(u8, s_tail_ack_queue) __field(u8, s_acked_ack_queue) __field(u8, s_ack_state) __field(u8, s_nak_state) __field(u8, r_nak_state) __field(u32, s_flags) __field(u32, ps_flags) __field(unsigned long, iow_flags) ), TP_fast_assign(/* assign */ struct hfi1_qp_priv *priv = qp->priv; DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->state = qp->state; __entry->s_state = qp->s_state; __entry->psn = psn; __entry->r_psn = qp->r_psn; __entry->r_state = qp->r_state; __entry->r_flags = qp->r_flags; __entry->r_head_ack_queue = qp->r_head_ack_queue; __entry->s_tail_ack_queue = qp->s_tail_ack_queue; __entry->s_acked_ack_queue = qp->s_acked_ack_queue; __entry->s_ack_state = qp->s_ack_state; __entry->s_nak_state = qp->s_nak_state; __entry->s_flags = qp->s_flags; __entry->ps_flags = priv->s_flags; __entry->iow_flags = priv->s_iowait.flags; ), TP_printk(/* print */ RSP_INFO_PRN, __get_str(dev), __entry->qpn, __entry->state, __entry->s_state, __entry->psn, __entry->r_psn, __entry->r_state, __entry->r_flags, __entry->r_head_ack_queue, __entry->s_tail_ack_queue, __entry->s_acked_ack_queue, __entry->s_ack_state, __entry->s_nak_state, __entry->s_flags, __entry->ps_flags, __entry->iow_flags ) ); DEFINE_EVENT(/* event */ hfi1_responder_info_template, hfi1_rsp_make_rc_ack, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(/* event */ hfi1_responder_info_template, hfi1_rsp_rcv_tid_read_req, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(/* event */ hfi1_responder_info_template, hfi1_rsp_tid_rcv_error, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(/* event */ hfi1_responder_info_template, hfi1_rsp_tid_write_alloc_res, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(/* event */ hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_req, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(/* event */ hfi1_responder_info_template, hfi1_rsp_build_tid_write_resp, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(/* event */ hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_data, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(/* event */ hfi1_responder_info_template, hfi1_rsp_make_tid_ack, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(/* event */ hfi1_responder_info_template, hfi1_rsp_handle_kdeth_eflags, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DEFINE_EVENT(/* event */ hfi1_responder_info_template, hfi1_rsp_read_kdeth_eflags, TP_PROTO(struct rvt_qp *qp, u32 psn), TP_ARGS(qp, psn) ); DECLARE_EVENT_CLASS(/* sender_info */ hfi1_sender_info_template, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(u8, state) __field(u32, s_cur) __field(u32, s_tail) __field(u32, s_head) __field(u32, s_acked) __field(u32, s_last) __field(u32, s_psn) __field(u32, s_last_psn) __field(u32, s_flags) __field(u32, ps_flags) __field(unsigned long, iow_flags) __field(u8, s_state) __field(u8, s_num_rd) __field(u8, s_retry) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->state = qp->state; __entry->s_cur = qp->s_cur; __entry->s_tail = qp->s_tail; __entry->s_head = qp->s_head; __entry->s_acked = qp->s_acked; __entry->s_last = qp->s_last; __entry->s_psn = qp->s_psn; __entry->s_last_psn = qp->s_last_psn; __entry->s_flags = qp->s_flags; __entry->ps_flags = ((struct hfi1_qp_priv *)qp->priv)->s_flags; __entry->iow_flags = ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags; __entry->s_state = qp->s_state; __entry->s_num_rd = qp->s_num_rd_atomic; __entry->s_retry = qp->s_retry; ), TP_printk(/* print */ SENDER_INFO_PRN, __get_str(dev), __entry->qpn, __entry->state, __entry->s_cur, __entry->s_tail, __entry->s_head, __entry->s_acked, __entry->s_last, __entry->s_psn, __entry->s_last_psn, __entry->s_flags, __entry->ps_flags, __entry->iow_flags, __entry->s_state, __entry->s_num_rd, __entry->s_retry ) ); DEFINE_EVENT(/* event */ hfi1_sender_info_template, hfi1_sender_make_rc_req, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_sender_info_template, hfi1_sender_reset_psn, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_sender_info_template, hfi1_sender_restart_rc, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_sender_info_template, hfi1_sender_do_rc_ack, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_sender_info_template, hfi1_sender_rcv_tid_read_resp, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_sender_info_template, hfi1_sender_rcv_tid_ack, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_sender_info_template, hfi1_sender_make_tid_pkt, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_sender_info_template, hfi1_sender_read_kdeth_eflags, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DECLARE_EVENT_CLASS(/* tid_read_sender */ hfi1_tid_read_sender_template, TP_PROTO(struct rvt_qp *qp, char newreq), TP_ARGS(qp, newreq), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(char, newreq) __field(u32, tid_r_reqs) __field(u32, tid_r_comp) __field(u32, pending_tid_r_segs) __field(u32, s_flags) __field(u32, ps_flags) __field(unsigned long, iow_flags) __field(u8, s_state) __field(u32, hw_flow_index) __field(u32, generation) __field(u32, fpsn) ), TP_fast_assign(/* assign */ struct hfi1_qp_priv *priv = qp->priv; DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->newreq = newreq; __entry->tid_r_reqs = priv->tid_r_reqs; __entry->tid_r_comp = priv->tid_r_comp; __entry->pending_tid_r_segs = priv->pending_tid_r_segs; __entry->s_flags = qp->s_flags; __entry->ps_flags = priv->s_flags; __entry->iow_flags = priv->s_iowait.flags; __entry->s_state = priv->s_state; __entry->hw_flow_index = priv->flow_state.index; __entry->generation = priv->flow_state.generation; __entry->fpsn = priv->flow_state.psn; ), TP_printk(/* print */ TID_READ_SENDER_PRN, __get_str(dev), __entry->qpn, __entry->newreq, __entry->tid_r_reqs, __entry->tid_r_comp, __entry->pending_tid_r_segs, __entry->s_flags, __entry->ps_flags, __entry->iow_flags, __entry->s_state, __entry->hw_flow_index, __entry->generation, __entry->fpsn ) ); DEFINE_EVENT(/* event */ hfi1_tid_read_sender_template, hfi1_tid_read_sender_make_req, TP_PROTO(struct rvt_qp *qp, char newreq), TP_ARGS(qp, newreq) ); DEFINE_EVENT(/* event */ hfi1_tid_read_sender_template, hfi1_tid_read_sender_kdeth_eflags, TP_PROTO(struct rvt_qp *qp, char newreq), TP_ARGS(qp, newreq) ); DECLARE_EVENT_CLASS(/* tid_rdma_request */ hfi1_tid_rdma_request_template, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(char, newreq) __field(u8, opcode) __field(u32, psn) __field(u32, lpsn) __field(u32, cur_seg) __field(u32, comp_seg) __field(u32, ack_seg) __field(u32, alloc_seg) __field(u32, total_segs) __field(u16, setup_head) __field(u16, clear_tail) __field(u16, flow_idx) __field(u16, acked_tail) __field(u32, state) __field(u32, r_ack_psn) __field(u32, r_flow_psn) __field(u32, r_last_acked) __field(u32, s_next_psn) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->newreq = newreq; __entry->opcode = opcode; __entry->psn = psn; __entry->lpsn = lpsn; __entry->cur_seg = req->cur_seg; __entry->comp_seg = req->comp_seg; __entry->ack_seg = req->ack_seg; __entry->alloc_seg = req->alloc_seg; __entry->total_segs = req->total_segs; __entry->setup_head = req->setup_head; __entry->clear_tail = req->clear_tail; __entry->flow_idx = req->flow_idx; __entry->acked_tail = req->acked_tail; __entry->state = req->state; __entry->r_ack_psn = req->r_ack_psn; __entry->r_flow_psn = req->r_flow_psn; __entry->r_last_acked = req->r_last_acked; __entry->s_next_psn = req->s_next_psn; ), TP_printk(/* print */ TID_REQ_PRN, __get_str(dev), __entry->qpn, __entry->newreq, __entry->opcode, __entry->psn, __entry->lpsn, __entry->cur_seg, __entry->comp_seg, __entry->ack_seg, __entry->alloc_seg, __entry->total_segs, __entry->setup_head, __entry->clear_tail, __entry->flow_idx, __entry->acked_tail, __entry->state, __entry->r_ack_psn, __entry->r_flow_psn, __entry->r_last_acked, __entry->s_next_psn ) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_read, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_build_read_req, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_req, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_resp, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_err, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_restart_req, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_setup_tid_wqe, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_write_alloc_res, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_req, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_build_write_resp, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_resp, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_data, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_tid_ack, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_tid_retry_timeout, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_resync, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_pkt, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_ack, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_handle_kdeth_eflags, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_read_kdeth_eflags, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_write, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DEFINE_EVENT(/* event */ hfi1_tid_rdma_request_template, hfi1_tid_req_update_num_rd_atomic, TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn, struct tid_rdma_request *req), TP_ARGS(qp, newreq, opcode, psn, lpsn, req) ); DECLARE_EVENT_CLASS(/* rc_rcv_err */ hfi1_rc_rcv_err_template, TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff), TP_ARGS(qp, opcode, psn, diff), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(u32, s_flags) __field(u8, state) __field(u8, s_acked_ack_queue) __field(u8, s_tail_ack_queue) __field(u8, r_head_ack_queue) __field(u32, opcode) __field(u32, psn) __field(u32, r_psn) __field(int, diff) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->s_flags = qp->s_flags; __entry->state = qp->state; __entry->s_acked_ack_queue = qp->s_acked_ack_queue; __entry->s_tail_ack_queue = qp->s_tail_ack_queue; __entry->r_head_ack_queue = qp->r_head_ack_queue; __entry->opcode = opcode; __entry->psn = psn; __entry->r_psn = qp->r_psn; __entry->diff = diff; ), TP_printk(/* print */ RCV_ERR_PRN, __get_str(dev), __entry->qpn, __entry->s_flags, __entry->state, __entry->s_acked_ack_queue, __entry->s_tail_ack_queue, __entry->r_head_ack_queue, __entry->opcode, __entry->psn, __entry->r_psn, __entry->diff ) ); DEFINE_EVENT(/* event */ hfi1_rc_rcv_err_template, hfi1_tid_rdma_rcv_err, TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff), TP_ARGS(qp, opcode, psn, diff) ); DECLARE_EVENT_CLASS(/* sge */ hfi1_sge_template, TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge), TP_ARGS(qp, index, sge), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(int, index) __field(u64, vaddr) __field(u32, sge_length) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->index = index; __entry->vaddr = (u64)sge->vaddr; __entry->sge_length = sge->sge_length; ), TP_printk(/* print */ "[%s] qpn 0x%x sge %d: vaddr 0x%llx sge_length %u", __get_str(dev), __entry->qpn, __entry->index, __entry->vaddr, __entry->sge_length ) ); DEFINE_EVENT(/* event */ hfi1_sge_template, hfi1_sge_check_align, TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge), TP_ARGS(qp, index, sge) ); DECLARE_EVENT_CLASS(/* tid_write_sp */ hfi1_tid_write_rsp_template, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(u32, r_tid_head) __field(u32, r_tid_tail) __field(u32, r_tid_ack) __field(u32, r_tid_alloc) __field(u32, alloc_w_segs) __field(u32, pending_tid_w_segs) __field(bool, sync_pt) __field(u32, ps_nak_psn) __field(u8, ps_nak_state) __field(u8, prnr_nak_state) __field(u32, hw_flow_index) __field(u32, generation) __field(u32, fpsn) __field(bool, resync) __field(u32, r_next_psn_kdeth) ), TP_fast_assign(/* assign */ struct hfi1_qp_priv *priv = qp->priv; DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->r_tid_head = priv->r_tid_head; __entry->r_tid_tail = priv->r_tid_tail; __entry->r_tid_ack = priv->r_tid_ack; __entry->r_tid_alloc = priv->r_tid_alloc; __entry->alloc_w_segs = priv->alloc_w_segs; __entry->pending_tid_w_segs = priv->pending_tid_w_segs; __entry->sync_pt = priv->sync_pt; __entry->ps_nak_psn = priv->s_nak_psn; __entry->ps_nak_state = priv->s_nak_state; __entry->prnr_nak_state = priv->rnr_nak_state; __entry->hw_flow_index = priv->flow_state.index; __entry->generation = priv->flow_state.generation; __entry->fpsn = priv->flow_state.psn; __entry->resync = priv->resync; __entry->r_next_psn_kdeth = priv->r_next_psn_kdeth; ), TP_printk(/* print */ TID_WRITE_RSPDR_PRN, __get_str(dev), __entry->qpn, __entry->r_tid_head, __entry->r_tid_tail, __entry->r_tid_ack, __entry->r_tid_alloc, __entry->alloc_w_segs, __entry->pending_tid_w_segs, __entry->sync_pt ? "yes" : "no", __entry->ps_nak_psn, __entry->ps_nak_state, __entry->prnr_nak_state, __entry->hw_flow_index, __entry->generation, __entry->fpsn, __entry->resync ? "yes" : "no", __entry->r_next_psn_kdeth ) ); DEFINE_EVENT(/* event */ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_alloc_res, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_req, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_build_resp, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_data, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_resync, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_tid_ack, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_handle_kdeth_eflags, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DEFINE_EVENT(/* event */ hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_rc_ack, TP_PROTO(struct rvt_qp *qp), TP_ARGS(qp) ); DECLARE_EVENT_CLASS(/* tid_write_sender */ hfi1_tid_write_sender_template, TP_PROTO(struct rvt_qp *qp, char newreq), TP_ARGS(qp, newreq), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(char, newreq) __field(u32, s_tid_cur) __field(u32, s_tid_tail) __field(u32, s_tid_head) __field(u32, pending_tid_w_resp) __field(u32, n_requests) __field(u32, n_tid_requests) __field(u32, s_flags) __field(u32, ps_flags) __field(unsigned long, iow_flags) __field(u8, s_state) __field(u8, s_retry) ), TP_fast_assign(/* assign */ struct hfi1_qp_priv *priv = qp->priv; DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->newreq = newreq; __entry->s_tid_cur = priv->s_tid_cur; __entry->s_tid_tail = priv->s_tid_tail; __entry->s_tid_head = priv->s_tid_head; __entry->pending_tid_w_resp = priv->pending_tid_w_resp; __entry->n_requests = atomic_read(&priv->n_requests); __entry->n_tid_requests = atomic_read(&priv->n_tid_requests); __entry->s_flags = qp->s_flags; __entry->ps_flags = priv->s_flags; __entry->iow_flags = priv->s_iowait.flags; __entry->s_state = priv->s_state; __entry->s_retry = priv->s_retry; ), TP_printk(/* print */ TID_WRITE_SENDER_PRN, __get_str(dev), __entry->qpn, __entry->newreq, __entry->s_tid_cur, __entry->s_tid_tail, __entry->s_tid_head, __entry->pending_tid_w_resp, __entry->n_requests, __entry->n_tid_requests, __entry->s_flags, __entry->ps_flags, __entry->iow_flags, __entry->s_state, __entry->s_retry ) ); DEFINE_EVENT(/* event */ hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_resp, TP_PROTO(struct rvt_qp *qp, char newreq), TP_ARGS(qp, newreq) ); DEFINE_EVENT(/* event */ hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_tid_ack, TP_PROTO(struct rvt_qp *qp, char newreq), TP_ARGS(qp, newreq) ); DEFINE_EVENT(/* event */ hfi1_tid_write_sender_template, hfi1_tid_write_sender_retry_timeout, TP_PROTO(struct rvt_qp *qp, char newreq), TP_ARGS(qp, newreq) ); DEFINE_EVENT(/* event */ hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_tid_pkt, TP_PROTO(struct rvt_qp *qp, char newreq), TP_ARGS(qp, newreq) ); DEFINE_EVENT(/* event */ hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_req, TP_PROTO(struct rvt_qp *qp, char newreq), TP_ARGS(qp, newreq) ); DEFINE_EVENT(/* event */ hfi1_tid_write_sender_template, hfi1_tid_write_sender_restart_rc, TP_PROTO(struct rvt_qp *qp, char newreq), TP_ARGS(qp, newreq) ); DECLARE_EVENT_CLASS(/* tid_ack */ hfi1_tid_ack_template, TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn, u32 req_psn, u32 resync_psn), TP_ARGS(qp, aeth, psn, req_psn, resync_psn), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(u32, aeth) __field(u32, psn) __field(u32, req_psn) __field(u32, resync_psn) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->aeth = aeth; __entry->psn = psn; __entry->req_psn = req_psn; __entry->resync_psn = resync_psn; ), TP_printk(/* print */ "[%s] qpn 0x%x aeth 0x%x psn 0x%x req_psn 0x%x resync_psn 0x%x", __get_str(dev), __entry->qpn, __entry->aeth, __entry->psn, __entry->req_psn, __entry->resync_psn ) ); DEFINE_EVENT(/* rcv_tid_ack */ hfi1_tid_ack_template, hfi1_rcv_tid_ack, TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn, u32 req_psn, u32 resync_psn), TP_ARGS(qp, aeth, psn, req_psn, resync_psn) ); DECLARE_EVENT_CLASS(/* kdeth_eflags_error */ hfi1_kdeth_eflags_error_template, TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn), TP_ARGS(qp, rcv_type, rte, psn), TP_STRUCT__entry(/* entry */ DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device)) __field(u32, qpn) __field(u8, rcv_type) __field(u8, rte) __field(u32, psn) ), TP_fast_assign(/* assign */ DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->rcv_type = rcv_type; __entry->rte = rte; __entry->psn = psn; ), TP_printk(/* print */ KDETH_EFLAGS_ERR_PRN, __get_str(dev), __entry->qpn, __entry->rcv_type, __entry->rte, __entry->psn ) ); DEFINE_EVENT(/* event */ hfi1_kdeth_eflags_error_template, hfi1_eflags_err_write, TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn), TP_ARGS(qp, rcv_type, rte, psn) ); #endif /* __HFI1_TRACE_TID_H */ #undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE trace_tid #include <trace/define_trace.h>
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2023 Meta Platforms, Inc. and affiliates. #define STACK_MAX_LEN 600 #define SUBPROGS #define NO_UNROLL #define USE_ITER #include "pyperf.h"
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2017-2019 Intel Corporation */ #ifndef _INTEL_GUC_FW_H_ #define _INTEL_GUC_FW_H_ struct intel_guc; int intel_guc_fw_upload(struct intel_guc *guc); #endif
// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) /* Copyright (C) 2019 Netronome Systems, Inc. */ #include <linux/if_arp.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mpls.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/tc_act/tc_mpls.h> #include <net/mpls.h> #include <net/netlink.h> #include <net/pkt_sched.h> #include <net/pkt_cls.h> #include <net/tc_act/tc_mpls.h> #include <net/tc_wrapper.h> static struct tc_action_ops act_mpls_ops; #define ACT_MPLS_TTL_DEFAULT 255 static __be32 tcf_mpls_get_lse(struct mpls_shim_hdr *lse, struct tcf_mpls_params *p, bool set_bos) { u32 new_lse = 0; if (lse) new_lse = be32_to_cpu(lse->label_stack_entry); if (p->tcfm_label != ACT_MPLS_LABEL_NOT_SET) { new_lse &= ~MPLS_LS_LABEL_MASK; new_lse |= p->tcfm_label << MPLS_LS_LABEL_SHIFT; } if (p->tcfm_ttl) { new_lse &= ~MPLS_LS_TTL_MASK; new_lse |= p->tcfm_ttl << MPLS_LS_TTL_SHIFT; } if (p->tcfm_tc != ACT_MPLS_TC_NOT_SET) { new_lse &= ~MPLS_LS_TC_MASK; new_lse |= p->tcfm_tc << MPLS_LS_TC_SHIFT; } if (p->tcfm_bos != ACT_MPLS_BOS_NOT_SET) { new_lse &= ~MPLS_LS_S_MASK; new_lse |= p->tcfm_bos << MPLS_LS_S_SHIFT; } else if (set_bos) { new_lse |= 1 << MPLS_LS_S_SHIFT; } return cpu_to_be32(new_lse); } TC_INDIRECT_SCOPE int tcf_mpls_act(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_mpls *m = to_mpls(a); struct tcf_mpls_params *p; __be32 new_lse; int ret, mac_len; tcf_lastuse_update(&m->tcf_tm); bstats_update(this_cpu_ptr(m->common.cpu_bstats), skb); /* Ensure 'data' points at mac_header prior calling mpls manipulating * functions. */ if (skb_at_tc_ingress(skb)) { skb_push_rcsum(skb, skb->mac_len); mac_len = skb->mac_len; } else { mac_len = skb_network_offset(skb); } ret = READ_ONCE(m->tcf_action); p = rcu_dereference_bh(m->mpls_p); switch (p->tcfm_action) { case TCA_MPLS_ACT_POP: if (skb_mpls_pop(skb, p->tcfm_proto, mac_len, skb->dev && skb->dev->type == ARPHRD_ETHER)) goto drop; break; case TCA_MPLS_ACT_PUSH: new_lse = tcf_mpls_get_lse(NULL, p, !eth_p_mpls(skb_protocol(skb, true))); if (skb_mpls_push(skb, new_lse, p->tcfm_proto, mac_len, skb->dev && skb->dev->type == ARPHRD_ETHER)) goto drop; break; case TCA_MPLS_ACT_MAC_PUSH: if (skb_vlan_tag_present(skb)) { if (__vlan_insert_inner_tag(skb, skb->vlan_proto, skb_vlan_tag_get(skb), ETH_HLEN) < 0) goto drop; skb->protocol = skb->vlan_proto; __vlan_hwaccel_clear_tag(skb); } new_lse = tcf_mpls_get_lse(NULL, p, mac_len || !eth_p_mpls(skb->protocol)); if (skb_mpls_push(skb, new_lse, p->tcfm_proto, 0, false)) goto drop; break; case TCA_MPLS_ACT_MODIFY: if (!pskb_may_pull(skb, skb_network_offset(skb) + MPLS_HLEN)) goto drop; new_lse = tcf_mpls_get_lse(mpls_hdr(skb), p, false); if (skb_mpls_update_lse(skb, new_lse)) goto drop; break; case TCA_MPLS_ACT_DEC_TTL: if (skb_mpls_dec_ttl(skb)) goto drop; break; } if (skb_at_tc_ingress(skb)) skb_pull_rcsum(skb, skb->mac_len); return ret; drop: qstats_drop_inc(this_cpu_ptr(m->common.cpu_qstats)); return TC_ACT_SHOT; } static int valid_label(const struct nlattr *attr, struct netlink_ext_ack *extack) { const u32 *label = nla_data(attr); if (nla_len(attr) != sizeof(*label)) { NL_SET_ERR_MSG_MOD(extack, "Invalid MPLS label length"); return -EINVAL; } if (*label & ~MPLS_LABEL_MASK || *label == MPLS_LABEL_IMPLNULL) { NL_SET_ERR_MSG_MOD(extack, "MPLS label out of range"); return -EINVAL; } return 0; } static const struct nla_policy mpls_policy[TCA_MPLS_MAX + 1] = { [TCA_MPLS_PARMS] = NLA_POLICY_EXACT_LEN(sizeof(struct tc_mpls)), [TCA_MPLS_PROTO] = { .type = NLA_U16 }, [TCA_MPLS_LABEL] = NLA_POLICY_VALIDATE_FN(NLA_BINARY, valid_label), [TCA_MPLS_TC] = NLA_POLICY_RANGE(NLA_U8, 0, 7), [TCA_MPLS_TTL] = NLA_POLICY_MIN(NLA_U8, 1), [TCA_MPLS_BOS] = NLA_POLICY_RANGE(NLA_U8, 0, 1), }; static int tcf_mpls_init(struct net *net, struct nlattr *nla, struct nlattr *est, struct tc_action **a, struct tcf_proto *tp, u32 flags, struct netlink_ext_ack *extack) { struct tc_action_net *tn = net_generic(net, act_mpls_ops.net_id); bool bind = flags & TCA_ACT_FLAGS_BIND; struct nlattr *tb[TCA_MPLS_MAX + 1]; struct tcf_chain *goto_ch = NULL; struct tcf_mpls_params *p; struct tc_mpls *parm; bool exists = false; struct tcf_mpls *m; int ret = 0, err; u8 mpls_ttl = 0; u32 index; if (!nla) { NL_SET_ERR_MSG_MOD(extack, "Missing netlink attributes"); return -EINVAL; } err = nla_parse_nested(tb, TCA_MPLS_MAX, nla, mpls_policy, extack); if (err < 0) return err; if (!tb[TCA_MPLS_PARMS]) { NL_SET_ERR_MSG_MOD(extack, "No MPLS params"); return -EINVAL; } parm = nla_data(tb[TCA_MPLS_PARMS]); index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) return err; exists = err; if (exists && bind) return ACT_P_BOUND; if (!exists) { ret = tcf_idr_create(tn, index, est, a, &act_mpls_ops, bind, true, flags); if (ret) { tcf_idr_cleanup(tn, index); return ret; } ret = ACT_P_CREATED; } else if (!(flags & TCA_ACT_FLAGS_REPLACE)) { tcf_idr_release(*a, bind); return -EEXIST; } /* Verify parameters against action type. */ switch (parm->m_action) { case TCA_MPLS_ACT_POP: if (!tb[TCA_MPLS_PROTO]) { NL_SET_ERR_MSG_MOD(extack, "Protocol must be set for MPLS pop"); err = -EINVAL; goto release_idr; } if (!eth_proto_is_802_3(nla_get_be16(tb[TCA_MPLS_PROTO]))) { NL_SET_ERR_MSG_MOD(extack, "Invalid protocol type for MPLS pop"); err = -EINVAL; goto release_idr; } if (tb[TCA_MPLS_LABEL] || tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] || tb[TCA_MPLS_BOS]) { NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC or BOS cannot be used with MPLS pop"); err = -EINVAL; goto release_idr; } break; case TCA_MPLS_ACT_DEC_TTL: if (tb[TCA_MPLS_PROTO] || tb[TCA_MPLS_LABEL] || tb[TCA_MPLS_TTL] || tb[TCA_MPLS_TC] || tb[TCA_MPLS_BOS]) { NL_SET_ERR_MSG_MOD(extack, "Label, TTL, TC, BOS or protocol cannot be used with MPLS dec_ttl"); err = -EINVAL; goto release_idr; } break; case TCA_MPLS_ACT_PUSH: case TCA_MPLS_ACT_MAC_PUSH: if (!tb[TCA_MPLS_LABEL]) { NL_SET_ERR_MSG_MOD(extack, "Label is required for MPLS push"); err = -EINVAL; goto release_idr; } if (tb[TCA_MPLS_PROTO] && !eth_p_mpls(nla_get_be16(tb[TCA_MPLS_PROTO]))) { NL_SET_ERR_MSG_MOD(extack, "Protocol must be an MPLS type for MPLS push"); err = -EPROTONOSUPPORT; goto release_idr; } /* Push needs a TTL - if not specified, set a default value. */ if (!tb[TCA_MPLS_TTL]) { #if IS_ENABLED(CONFIG_MPLS) mpls_ttl = net->mpls.default_ttl ? net->mpls.default_ttl : ACT_MPLS_TTL_DEFAULT; #else mpls_ttl = ACT_MPLS_TTL_DEFAULT; #endif } break; case TCA_MPLS_ACT_MODIFY: if (tb[TCA_MPLS_PROTO]) { NL_SET_ERR_MSG_MOD(extack, "Protocol cannot be used with MPLS modify"); err = -EINVAL; goto release_idr; } break; default: NL_SET_ERR_MSG_MOD(extack, "Unknown MPLS action"); err = -EINVAL; goto release_idr; } err = tcf_action_check_ctrlact(parm->action, tp, &goto_ch, extack); if (err < 0) goto release_idr; m = to_mpls(*a); p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { err = -ENOMEM; goto put_chain; } p->tcfm_action = parm->m_action; p->tcfm_label = nla_get_u32_default(tb[TCA_MPLS_LABEL], ACT_MPLS_LABEL_NOT_SET); p->tcfm_tc = nla_get_u8_default(tb[TCA_MPLS_TC], ACT_MPLS_TC_NOT_SET); p->tcfm_ttl = nla_get_u8_default(tb[TCA_MPLS_TTL], mpls_ttl); p->tcfm_bos = nla_get_u8_default(tb[TCA_MPLS_BOS], ACT_MPLS_BOS_NOT_SET); p->tcfm_proto = nla_get_be16_default(tb[TCA_MPLS_PROTO], htons(ETH_P_MPLS_UC)); spin_lock_bh(&m->tcf_lock); goto_ch = tcf_action_set_ctrlact(*a, parm->action, goto_ch); p = rcu_replace_pointer(m->mpls_p, p, lockdep_is_held(&m->tcf_lock)); spin_unlock_bh(&m->tcf_lock); if (goto_ch) tcf_chain_put_by_act(goto_ch); if (p) kfree_rcu(p, rcu); return ret; put_chain: if (goto_ch) tcf_chain_put_by_act(goto_ch); release_idr: tcf_idr_release(*a, bind); return err; } static void tcf_mpls_cleanup(struct tc_action *a) { struct tcf_mpls *m = to_mpls(a); struct tcf_mpls_params *p; p = rcu_dereference_protected(m->mpls_p, 1); if (p) kfree_rcu(p, rcu); } static int tcf_mpls_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_mpls *m = to_mpls(a); struct tcf_mpls_params *p; struct tc_mpls opt = { .index = m->tcf_index, .refcnt = refcount_read(&m->tcf_refcnt) - ref, .bindcnt = atomic_read(&m->tcf_bindcnt) - bind, }; struct tcf_t t; spin_lock_bh(&m->tcf_lock); opt.action = m->tcf_action; p = rcu_dereference_protected(m->mpls_p, lockdep_is_held(&m->tcf_lock)); opt.m_action = p->tcfm_action; if (nla_put(skb, TCA_MPLS_PARMS, sizeof(opt), &opt)) goto nla_put_failure; if (p->tcfm_label != ACT_MPLS_LABEL_NOT_SET && nla_put_u32(skb, TCA_MPLS_LABEL, p->tcfm_label)) goto nla_put_failure; if (p->tcfm_tc != ACT_MPLS_TC_NOT_SET && nla_put_u8(skb, TCA_MPLS_TC, p->tcfm_tc)) goto nla_put_failure; if (p->tcfm_ttl && nla_put_u8(skb, TCA_MPLS_TTL, p->tcfm_ttl)) goto nla_put_failure; if (p->tcfm_bos != ACT_MPLS_BOS_NOT_SET && nla_put_u8(skb, TCA_MPLS_BOS, p->tcfm_bos)) goto nla_put_failure; if (nla_put_be16(skb, TCA_MPLS_PROTO, p->tcfm_proto)) goto nla_put_failure; tcf_tm_dump(&t, &m->tcf_tm); if (nla_put_64bit(skb, TCA_MPLS_TM, sizeof(t), &t, TCA_MPLS_PAD)) goto nla_put_failure; spin_unlock_bh(&m->tcf_lock); return skb->len; nla_put_failure: spin_unlock_bh(&m->tcf_lock); nlmsg_trim(skb, b); return -EMSGSIZE; } static int tcf_mpls_offload_act_setup(struct tc_action *act, void *entry_data, u32 *index_inc, bool bind, struct netlink_ext_ack *extack) { if (bind) { struct flow_action_entry *entry = entry_data; switch (tcf_mpls_action(act)) { case TCA_MPLS_ACT_PUSH: entry->id = FLOW_ACTION_MPLS_PUSH; entry->mpls_push.proto = tcf_mpls_proto(act); entry->mpls_push.label = tcf_mpls_label(act); entry->mpls_push.tc = tcf_mpls_tc(act); entry->mpls_push.bos = tcf_mpls_bos(act); entry->mpls_push.ttl = tcf_mpls_ttl(act); break; case TCA_MPLS_ACT_POP: entry->id = FLOW_ACTION_MPLS_POP; entry->mpls_pop.proto = tcf_mpls_proto(act); break; case TCA_MPLS_ACT_MODIFY: entry->id = FLOW_ACTION_MPLS_MANGLE; entry->mpls_mangle.label = tcf_mpls_label(act); entry->mpls_mangle.tc = tcf_mpls_tc(act); entry->mpls_mangle.bos = tcf_mpls_bos(act); entry->mpls_mangle.ttl = tcf_mpls_ttl(act); break; case TCA_MPLS_ACT_DEC_TTL: NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"dec_ttl\" option is used"); return -EOPNOTSUPP; case TCA_MPLS_ACT_MAC_PUSH: NL_SET_ERR_MSG_MOD(extack, "Offload not supported when \"mac_push\" option is used"); return -EOPNOTSUPP; default: NL_SET_ERR_MSG_MOD(extack, "Unsupported MPLS mode offload"); return -EOPNOTSUPP; } *index_inc = 1; } else { struct flow_offload_action *fl_action = entry_data; switch (tcf_mpls_action(act)) { case TCA_MPLS_ACT_PUSH: fl_action->id = FLOW_ACTION_MPLS_PUSH; break; case TCA_MPLS_ACT_POP: fl_action->id = FLOW_ACTION_MPLS_POP; break; case TCA_MPLS_ACT_MODIFY: fl_action->id = FLOW_ACTION_MPLS_MANGLE; break; default: return -EOPNOTSUPP; } } return 0; } static struct tc_action_ops act_mpls_ops = { .kind = "mpls", .id = TCA_ID_MPLS, .owner = THIS_MODULE, .act = tcf_mpls_act, .dump = tcf_mpls_dump, .init = tcf_mpls_init, .cleanup = tcf_mpls_cleanup, .offload_act_setup = tcf_mpls_offload_act_setup, .size = sizeof(struct tcf_mpls), }; MODULE_ALIAS_NET_ACT("mpls"); static __net_init int mpls_init_net(struct net *net) { struct tc_action_net *tn = net_generic(net, act_mpls_ops.net_id); return tc_action_net_init(net, tn, &act_mpls_ops); } static void __net_exit mpls_exit_net(struct list_head *net_list) { tc_action_net_exit(net_list, act_mpls_ops.net_id); } static struct pernet_operations mpls_net_ops = { .init = mpls_init_net, .exit_batch = mpls_exit_net, .id = &act_mpls_ops.net_id, .size = sizeof(struct tc_action_net), }; static int __init mpls_init_module(void) { return tcf_register_action(&act_mpls_ops, &mpls_net_ops); } static void __exit mpls_cleanup_module(void) { tcf_unregister_action(&act_mpls_ops, &mpls_net_ops); } module_init(mpls_init_module); module_exit(mpls_cleanup_module); MODULE_SOFTDEP("post: mpls_gso"); MODULE_AUTHOR("Netronome Systems <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MPLS manipulation actions");
// SPDX-License-Identifier: GPL-2.0 #include "gcc-common.h" __visible int plugin_is_GPL_compatible; static unsigned int canary_offset; static unsigned int arm_pertask_ssp_rtl_execute(void) { rtx_insn *insn; for (insn = get_insns(); insn; insn = NEXT_INSN(insn)) { const char *sym; rtx body; rtx current; /* * Find a SET insn involving a SYMBOL_REF to __stack_chk_guard */ if (!INSN_P(insn)) continue; body = PATTERN(insn); if (GET_CODE(body) != SET || GET_CODE(SET_SRC(body)) != SYMBOL_REF) continue; sym = XSTR(SET_SRC(body), 0); if (strcmp(sym, "__stack_chk_guard")) continue; /* * Replace the source of the SET insn with an expression that * produces the address of the current task's stack canary value */ current = gen_reg_rtx(Pmode); emit_insn_before(gen_load_tp_hard(current), insn); SET_SRC(body) = gen_rtx_PLUS(Pmode, current, GEN_INT(canary_offset)); } return 0; } #define PASS_NAME arm_pertask_ssp_rtl #define NO_GATE #include "gcc-generate-rtl-pass.h" #if BUILDING_GCC_VERSION >= 9000 static bool no(void) { return false; } static void arm_pertask_ssp_start_unit(void *gcc_data, void *user_data) { targetm.have_stack_protect_combined_set = no; targetm.have_stack_protect_combined_test = no; } #endif __visible int plugin_init(struct plugin_name_args *plugin_info, struct plugin_gcc_version *version) { const char * const plugin_name = plugin_info->base_name; const int argc = plugin_info->argc; const struct plugin_argument *argv = plugin_info->argv; int i; if (!plugin_default_version_check(version, &gcc_version)) { error(G_("incompatible gcc/plugin versions")); return 1; } for (i = 0; i < argc; ++i) { if (!strcmp(argv[i].key, "disable")) return 0; /* all remaining options require a value */ if (!argv[i].value) { error(G_("no value supplied for option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); return 1; } if (!strcmp(argv[i].key, "offset")) { canary_offset = atoi(argv[i].value); continue; } error(G_("unknown option '-fplugin-arg-%s-%s'"), plugin_name, argv[i].key); return 1; } PASS_INFO(arm_pertask_ssp_rtl, "expand", 1, PASS_POS_INSERT_AFTER); register_callback(plugin_info->base_name, PLUGIN_PASS_MANAGER_SETUP, NULL, &arm_pertask_ssp_rtl_pass_info); #if BUILDING_GCC_VERSION >= 9000 register_callback(plugin_info->base_name, PLUGIN_START_UNIT, arm_pertask_ssp_start_unit, NULL); #endif return 0; }
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * Copyright 2008 Openmoko, Inc. * Copyright 2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <[email protected]> * * S3C Platform - SDHCI (HSMMC) platform data definitions */ #ifndef __PLAT_S3C_SDHCI_H #define __PLAT_S3C_SDHCI_H __FILE__ #include <linux/platform_data/mmc-sdhci-s3c.h> #include "devs.h" /* s3c_sdhci_set_platdata() - common helper for setting SDHCI platform data * @pd: The default platform data for this device. * @set: Pointer to the platform data to fill in. */ extern void s3c_sdhci_set_platdata(struct s3c_sdhci_platdata *pd, struct s3c_sdhci_platdata *set); /** * s3c_sdhci0_set_platdata - Set platform data for S3C SDHCI device. * @pd: Platform data to register to device. * * Register the given platform data for use withe S3C SDHCI device. * The call will copy the platform data, so the board definitions can * make the structure itself __initdata. */ extern void s3c_sdhci0_set_platdata(struct s3c_sdhci_platdata *pd); extern void s3c_sdhci1_set_platdata(struct s3c_sdhci_platdata *pd); extern void s3c_sdhci2_set_platdata(struct s3c_sdhci_platdata *pd); extern void s3c_sdhci3_set_platdata(struct s3c_sdhci_platdata *pd); /* Default platform data, exported so that per-cpu initialisation can * set the correct one when there are more than one cpu type selected. */ extern struct s3c_sdhci_platdata s3c_hsmmc0_def_platdata; extern struct s3c_sdhci_platdata s3c_hsmmc1_def_platdata; extern struct s3c_sdhci_platdata s3c_hsmmc2_def_platdata; extern struct s3c_sdhci_platdata s3c_hsmmc3_def_platdata; /* Helper function availability */ extern void s3c64xx_setup_sdhci0_cfg_gpio(struct platform_device *, int w); extern void s3c64xx_setup_sdhci1_cfg_gpio(struct platform_device *, int w); extern void s3c64xx_setup_sdhci2_cfg_gpio(struct platform_device *, int w); /* S3C64XX SDHCI setup */ #ifdef CONFIG_S3C64XX_SETUP_SDHCI static inline void s3c6400_default_sdhci0(void) { #ifdef CONFIG_S3C_DEV_HSMMC s3c_hsmmc0_def_platdata.cfg_gpio = s3c64xx_setup_sdhci0_cfg_gpio; #endif } static inline void s3c6400_default_sdhci1(void) { #ifdef CONFIG_S3C_DEV_HSMMC1 s3c_hsmmc1_def_platdata.cfg_gpio = s3c64xx_setup_sdhci1_cfg_gpio; #endif } static inline void s3c6400_default_sdhci2(void) { #ifdef CONFIG_S3C_DEV_HSMMC2 s3c_hsmmc2_def_platdata.cfg_gpio = s3c64xx_setup_sdhci2_cfg_gpio; #endif } static inline void s3c6410_default_sdhci0(void) { #ifdef CONFIG_S3C_DEV_HSMMC s3c_hsmmc0_def_platdata.cfg_gpio = s3c64xx_setup_sdhci0_cfg_gpio; #endif } static inline void s3c6410_default_sdhci1(void) { #ifdef CONFIG_S3C_DEV_HSMMC1 s3c_hsmmc1_def_platdata.cfg_gpio = s3c64xx_setup_sdhci1_cfg_gpio; #endif } static inline void s3c6410_default_sdhci2(void) { #ifdef CONFIG_S3C_DEV_HSMMC2 s3c_hsmmc2_def_platdata.cfg_gpio = s3c64xx_setup_sdhci2_cfg_gpio; #endif } #else static inline void s3c6410_default_sdhci0(void) { } static inline void s3c6410_default_sdhci1(void) { } static inline void s3c6410_default_sdhci2(void) { } static inline void s3c6400_default_sdhci0(void) { } static inline void s3c6400_default_sdhci1(void) { } static inline void s3c6400_default_sdhci2(void) { } #endif /* CONFIG_S3C64XX_SETUP_SDHCI */ static inline void s3c_sdhci_setname(int id, char *name) { switch (id) { #ifdef CONFIG_S3C_DEV_HSMMC case 0: s3c_device_hsmmc0.name = name; break; #endif #ifdef CONFIG_S3C_DEV_HSMMC1 case 1: s3c_device_hsmmc1.name = name; break; #endif #ifdef CONFIG_S3C_DEV_HSMMC2 case 2: s3c_device_hsmmc2.name = name; break; #endif #ifdef CONFIG_S3C_DEV_HSMMC3 case 3: s3c_device_hsmmc3.name = name; break; #endif default: break; } } #endif /* __PLAT_S3C_SDHCI_H */
// SPDX-License-Identifier: BSD-3-Clause /* * Copyright (c) 2020, Konrad Dybcio */ /dts-v1/; #include "sdm630.dtsi" #include "sdm630-sony-xperia-nile.dtsi" / { model = "Sony Xperia XA2 Plus"; compatible = "sony,voyager-row", "qcom,sdm630"; chassis-type = "handset"; chosen { framebuffer@9d400000 { reg = <0 0x9d400000 0 (2160 * 1080 * 4)>; height = <2160>; }; }; };
// SPDX-License-Identifier: GPL-2.0-or-later /* * drivers/net/team/team_mode_loadbalance.c - Load-balancing mode for team * Copyright (c) 2012 Jiri Pirko <[email protected]> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/filter.h> #include <linux/if_team.h> static rx_handler_result_t lb_receive(struct team *team, struct team_port *port, struct sk_buff *skb) { if (unlikely(skb->protocol == htons(ETH_P_SLOW))) { /* LACPDU packets should go to exact delivery */ const unsigned char *dest = eth_hdr(skb)->h_dest; if (is_link_local_ether_addr(dest) && dest[5] == 0x02) return RX_HANDLER_EXACT; } return RX_HANDLER_ANOTHER; } struct lb_priv; typedef struct team_port *lb_select_tx_port_func_t(struct team *, unsigned char); #define LB_TX_HASHTABLE_SIZE 256 /* hash is a char */ struct lb_stats { u64 tx_bytes; }; struct lb_pcpu_stats { struct lb_stats hash_stats[LB_TX_HASHTABLE_SIZE]; struct u64_stats_sync syncp; }; struct lb_stats_info { struct lb_stats stats; struct lb_stats last_stats; struct team_option_inst_info *opt_inst_info; }; struct lb_port_mapping { struct team_port __rcu *port; struct team_option_inst_info *opt_inst_info; }; struct lb_priv_ex { struct team *team; struct lb_port_mapping tx_hash_to_port_mapping[LB_TX_HASHTABLE_SIZE]; struct sock_fprog_kern *orig_fprog; struct { unsigned int refresh_interval; /* in tenths of second */ struct delayed_work refresh_dw; struct lb_stats_info info[LB_TX_HASHTABLE_SIZE]; } stats; }; struct lb_priv { struct bpf_prog __rcu *fp; lb_select_tx_port_func_t __rcu *select_tx_port_func; struct lb_pcpu_stats __percpu *pcpu_stats; struct lb_priv_ex *ex; /* priv extension */ }; static struct lb_priv *get_lb_priv(struct team *team) { return (struct lb_priv *) &team->mode_priv; } struct lb_port_priv { struct lb_stats __percpu *pcpu_stats; struct lb_stats_info stats_info; }; static struct lb_port_priv *get_lb_port_priv(struct team_port *port) { return (struct lb_port_priv *) &port->mode_priv; } #define LB_HTPM_PORT_BY_HASH(lp_priv, hash) \ (lb_priv)->ex->tx_hash_to_port_mapping[hash].port #define LB_HTPM_OPT_INST_INFO_BY_HASH(lp_priv, hash) \ (lb_priv)->ex->tx_hash_to_port_mapping[hash].opt_inst_info static void lb_tx_hash_to_port_mapping_null_port(struct team *team, struct team_port *port) { struct lb_priv *lb_priv = get_lb_priv(team); bool changed = false; int i; for (i = 0; i < LB_TX_HASHTABLE_SIZE; i++) { struct lb_port_mapping *pm; pm = &lb_priv->ex->tx_hash_to_port_mapping[i]; if (rcu_access_pointer(pm->port) == port) { RCU_INIT_POINTER(pm->port, NULL); team_option_inst_set_change(pm->opt_inst_info); changed = true; } } if (changed) team_options_change_check(team); } /* Basic tx selection based solely by hash */ static struct team_port *lb_hash_select_tx_port(struct team *team, unsigned char hash) { int port_index = team_num_to_port_index(team, hash); return team_get_port_by_index_rcu(team, port_index); } /* Hash to port mapping select tx port */ static struct team_port *lb_htpm_select_tx_port(struct team *team, unsigned char hash) { struct lb_priv *lb_priv = get_lb_priv(team); struct team_port *port; port = rcu_dereference_bh(LB_HTPM_PORT_BY_HASH(lb_priv, hash)); if (likely(port)) return port; /* If no valid port in the table, fall back to simple hash */ return lb_hash_select_tx_port(team, hash); } struct lb_select_tx_port { char *name; lb_select_tx_port_func_t *func; }; static const struct lb_select_tx_port lb_select_tx_port_list[] = { { .name = "hash", .func = lb_hash_select_tx_port, }, { .name = "hash_to_port_mapping", .func = lb_htpm_select_tx_port, }, }; #define LB_SELECT_TX_PORT_LIST_COUNT ARRAY_SIZE(lb_select_tx_port_list) static char *lb_select_tx_port_get_name(lb_select_tx_port_func_t *func) { int i; for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) { const struct lb_select_tx_port *item; item = &lb_select_tx_port_list[i]; if (item->func == func) return item->name; } return NULL; } static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name) { int i; for (i = 0; i < LB_SELECT_TX_PORT_LIST_COUNT; i++) { const struct lb_select_tx_port *item; item = &lb_select_tx_port_list[i]; if (!strcmp(item->name, name)) return item->func; } return NULL; } static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv, struct sk_buff *skb) { struct bpf_prog *fp; uint32_t lhash; unsigned char *c; fp = rcu_dereference_bh(lb_priv->fp); if (unlikely(!fp)) return 0; lhash = bpf_prog_run(fp, skb); c = (char *) &lhash; return c[0] ^ c[1] ^ c[2] ^ c[3]; } static void lb_update_tx_stats(unsigned int tx_bytes, struct lb_priv *lb_priv, struct lb_port_priv *lb_port_priv, unsigned char hash) { struct lb_pcpu_stats *pcpu_stats; struct lb_stats *port_stats; struct lb_stats *hash_stats; pcpu_stats = this_cpu_ptr(lb_priv->pcpu_stats); port_stats = this_cpu_ptr(lb_port_priv->pcpu_stats); hash_stats = &pcpu_stats->hash_stats[hash]; u64_stats_update_begin(&pcpu_stats->syncp); port_stats->tx_bytes += tx_bytes; hash_stats->tx_bytes += tx_bytes; u64_stats_update_end(&pcpu_stats->syncp); } static bool lb_transmit(struct team *team, struct sk_buff *skb) { struct lb_priv *lb_priv = get_lb_priv(team); lb_select_tx_port_func_t *select_tx_port_func; struct team_port *port; unsigned char hash; unsigned int tx_bytes = skb->len; hash = lb_get_skb_hash(lb_priv, skb); select_tx_port_func = rcu_dereference_bh(lb_priv->select_tx_port_func); port = select_tx_port_func(team, hash); if (unlikely(!port)) goto drop; if (team_dev_queue_xmit(team, port, skb)) return false; lb_update_tx_stats(tx_bytes, lb_priv, get_lb_port_priv(port), hash); return true; drop: dev_kfree_skb_any(skb); return false; } static void lb_bpf_func_get(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); if (!lb_priv->ex->orig_fprog) { ctx->data.bin_val.len = 0; ctx->data.bin_val.ptr = NULL; return; } ctx->data.bin_val.len = lb_priv->ex->orig_fprog->len * sizeof(struct sock_filter); ctx->data.bin_val.ptr = lb_priv->ex->orig_fprog->filter; } static int __fprog_create(struct sock_fprog_kern **pfprog, u32 data_len, const void *data) { struct sock_fprog_kern *fprog; struct sock_filter *filter = (struct sock_filter *) data; if (data_len % sizeof(struct sock_filter)) return -EINVAL; fprog = kmalloc(sizeof(*fprog), GFP_KERNEL); if (!fprog) return -ENOMEM; fprog->filter = kmemdup(filter, data_len, GFP_KERNEL); if (!fprog->filter) { kfree(fprog); return -ENOMEM; } fprog->len = data_len / sizeof(struct sock_filter); *pfprog = fprog; return 0; } static void __fprog_destroy(struct sock_fprog_kern *fprog) { kfree(fprog->filter); kfree(fprog); } static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); struct bpf_prog *fp = NULL; struct bpf_prog *orig_fp = NULL; struct sock_fprog_kern *fprog = NULL; int err; if (ctx->data.bin_val.len) { err = __fprog_create(&fprog, ctx->data.bin_val.len, ctx->data.bin_val.ptr); if (err) return err; err = bpf_prog_create(&fp, fprog); if (err) { __fprog_destroy(fprog); return err; } } if (lb_priv->ex->orig_fprog) { /* Clear old filter data */ __fprog_destroy(lb_priv->ex->orig_fprog); orig_fp = rcu_dereference_protected(lb_priv->fp, lockdep_is_held(&team->lock)); } rcu_assign_pointer(lb_priv->fp, fp); lb_priv->ex->orig_fprog = fprog; if (orig_fp) { synchronize_rcu(); bpf_prog_destroy(orig_fp); } return 0; } static void lb_bpf_func_free(struct team *team) { struct lb_priv *lb_priv = get_lb_priv(team); struct bpf_prog *fp; if (!lb_priv->ex->orig_fprog) return; __fprog_destroy(lb_priv->ex->orig_fprog); fp = rcu_dereference_protected(lb_priv->fp, lockdep_is_held(&team->lock)); bpf_prog_destroy(fp); } static void lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); lb_select_tx_port_func_t *func; char *name; func = rcu_dereference_protected(lb_priv->select_tx_port_func, lockdep_is_held(&team->lock)); name = lb_select_tx_port_get_name(func); BUG_ON(!name); ctx->data.str_val = name; } static int lb_tx_method_set(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); lb_select_tx_port_func_t *func; func = lb_select_tx_port_get_func(ctx->data.str_val); if (!func) return -EINVAL; rcu_assign_pointer(lb_priv->select_tx_port_func, func); return 0; } static void lb_tx_hash_to_port_mapping_init(struct team *team, struct team_option_inst_info *info) { struct lb_priv *lb_priv = get_lb_priv(team); unsigned char hash = info->array_index; LB_HTPM_OPT_INST_INFO_BY_HASH(lb_priv, hash) = info; } static void lb_tx_hash_to_port_mapping_get(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); struct team_port *port; unsigned char hash = ctx->info->array_index; port = LB_HTPM_PORT_BY_HASH(lb_priv, hash); ctx->data.u32_val = port ? port->dev->ifindex : 0; } static int lb_tx_hash_to_port_mapping_set(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); struct team_port *port; unsigned char hash = ctx->info->array_index; list_for_each_entry(port, &team->port_list, list) { if (ctx->data.u32_val == port->dev->ifindex && team_port_enabled(port)) { rcu_assign_pointer(LB_HTPM_PORT_BY_HASH(lb_priv, hash), port); return 0; } } return -ENODEV; } static void lb_hash_stats_init(struct team *team, struct team_option_inst_info *info) { struct lb_priv *lb_priv = get_lb_priv(team); unsigned char hash = info->array_index; lb_priv->ex->stats.info[hash].opt_inst_info = info; } static void lb_hash_stats_get(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); unsigned char hash = ctx->info->array_index; ctx->data.bin_val.ptr = &lb_priv->ex->stats.info[hash].stats; ctx->data.bin_val.len = sizeof(struct lb_stats); } static void lb_port_stats_init(struct team *team, struct team_option_inst_info *info) { struct team_port *port = info->port; struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); lb_port_priv->stats_info.opt_inst_info = info; } static void lb_port_stats_get(struct team *team, struct team_gsetter_ctx *ctx) { struct team_port *port = ctx->info->port; struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); ctx->data.bin_val.ptr = &lb_port_priv->stats_info.stats; ctx->data.bin_val.len = sizeof(struct lb_stats); } static void __lb_stats_info_refresh_prepare(struct lb_stats_info *s_info) { memcpy(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats)); memset(&s_info->stats, 0, sizeof(struct lb_stats)); } static bool __lb_stats_info_refresh_check(struct lb_stats_info *s_info, struct team *team) { if (memcmp(&s_info->last_stats, &s_info->stats, sizeof(struct lb_stats))) { team_option_inst_set_change(s_info->opt_inst_info); return true; } return false; } static void __lb_one_cpu_stats_add(struct lb_stats *acc_stats, struct lb_stats *cpu_stats, struct u64_stats_sync *syncp) { unsigned int start; struct lb_stats tmp; do { start = u64_stats_fetch_begin(syncp); tmp.tx_bytes = cpu_stats->tx_bytes; } while (u64_stats_fetch_retry(syncp, start)); acc_stats->tx_bytes += tmp.tx_bytes; } static void lb_stats_refresh(struct work_struct *work) { struct team *team; struct lb_priv *lb_priv; struct lb_priv_ex *lb_priv_ex; struct lb_pcpu_stats *pcpu_stats; struct lb_stats *stats; struct lb_stats_info *s_info; struct team_port *port; bool changed = false; int i; int j; lb_priv_ex = container_of(work, struct lb_priv_ex, stats.refresh_dw.work); team = lb_priv_ex->team; lb_priv = get_lb_priv(team); if (!mutex_trylock(&team->lock)) { schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, 0); return; } for (j = 0; j < LB_TX_HASHTABLE_SIZE; j++) { s_info = &lb_priv->ex->stats.info[j]; __lb_stats_info_refresh_prepare(s_info); for_each_possible_cpu(i) { pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); stats = &pcpu_stats->hash_stats[j]; __lb_one_cpu_stats_add(&s_info->stats, stats, &pcpu_stats->syncp); } changed |= __lb_stats_info_refresh_check(s_info, team); } list_for_each_entry(port, &team->port_list, list) { struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); s_info = &lb_port_priv->stats_info; __lb_stats_info_refresh_prepare(s_info); for_each_possible_cpu(i) { pcpu_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); stats = per_cpu_ptr(lb_port_priv->pcpu_stats, i); __lb_one_cpu_stats_add(&s_info->stats, stats, &pcpu_stats->syncp); } changed |= __lb_stats_info_refresh_check(s_info, team); } if (changed) team_options_change_check(team); schedule_delayed_work(&lb_priv_ex->stats.refresh_dw, (lb_priv_ex->stats.refresh_interval * HZ) / 10); mutex_unlock(&team->lock); } static void lb_stats_refresh_interval_get(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); ctx->data.u32_val = lb_priv->ex->stats.refresh_interval; } static int lb_stats_refresh_interval_set(struct team *team, struct team_gsetter_ctx *ctx) { struct lb_priv *lb_priv = get_lb_priv(team); unsigned int interval; interval = ctx->data.u32_val; if (lb_priv->ex->stats.refresh_interval == interval) return 0; lb_priv->ex->stats.refresh_interval = interval; if (interval) schedule_delayed_work(&lb_priv->ex->stats.refresh_dw, 0); else cancel_delayed_work(&lb_priv->ex->stats.refresh_dw); return 0; } static const struct team_option lb_options[] = { { .name = "bpf_hash_func", .type = TEAM_OPTION_TYPE_BINARY, .getter = lb_bpf_func_get, .setter = lb_bpf_func_set, }, { .name = "lb_tx_method", .type = TEAM_OPTION_TYPE_STRING, .getter = lb_tx_method_get, .setter = lb_tx_method_set, }, { .name = "lb_tx_hash_to_port_mapping", .array_size = LB_TX_HASHTABLE_SIZE, .type = TEAM_OPTION_TYPE_U32, .init = lb_tx_hash_to_port_mapping_init, .getter = lb_tx_hash_to_port_mapping_get, .setter = lb_tx_hash_to_port_mapping_set, }, { .name = "lb_hash_stats", .array_size = LB_TX_HASHTABLE_SIZE, .type = TEAM_OPTION_TYPE_BINARY, .init = lb_hash_stats_init, .getter = lb_hash_stats_get, }, { .name = "lb_port_stats", .per_port = true, .type = TEAM_OPTION_TYPE_BINARY, .init = lb_port_stats_init, .getter = lb_port_stats_get, }, { .name = "lb_stats_refresh_interval", .type = TEAM_OPTION_TYPE_U32, .getter = lb_stats_refresh_interval_get, .setter = lb_stats_refresh_interval_set, }, }; static int lb_init(struct team *team) { struct lb_priv *lb_priv = get_lb_priv(team); lb_select_tx_port_func_t *func; int i, err; /* set default tx port selector */ func = lb_select_tx_port_get_func("hash"); BUG_ON(!func); rcu_assign_pointer(lb_priv->select_tx_port_func, func); lb_priv->ex = kzalloc(sizeof(*lb_priv->ex), GFP_KERNEL); if (!lb_priv->ex) return -ENOMEM; lb_priv->ex->team = team; lb_priv->pcpu_stats = alloc_percpu(struct lb_pcpu_stats); if (!lb_priv->pcpu_stats) { err = -ENOMEM; goto err_alloc_pcpu_stats; } for_each_possible_cpu(i) { struct lb_pcpu_stats *team_lb_stats; team_lb_stats = per_cpu_ptr(lb_priv->pcpu_stats, i); u64_stats_init(&team_lb_stats->syncp); } INIT_DELAYED_WORK(&lb_priv->ex->stats.refresh_dw, lb_stats_refresh); err = team_options_register(team, lb_options, ARRAY_SIZE(lb_options)); if (err) goto err_options_register; return 0; err_options_register: free_percpu(lb_priv->pcpu_stats); err_alloc_pcpu_stats: kfree(lb_priv->ex); return err; } static void lb_exit(struct team *team) { struct lb_priv *lb_priv = get_lb_priv(team); team_options_unregister(team, lb_options, ARRAY_SIZE(lb_options)); lb_bpf_func_free(team); cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw); free_percpu(lb_priv->pcpu_stats); kfree(lb_priv->ex); } static int lb_port_enter(struct team *team, struct team_port *port) { struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); lb_port_priv->pcpu_stats = alloc_percpu(struct lb_stats); if (!lb_port_priv->pcpu_stats) return -ENOMEM; return 0; } static void lb_port_leave(struct team *team, struct team_port *port) { struct lb_port_priv *lb_port_priv = get_lb_port_priv(port); free_percpu(lb_port_priv->pcpu_stats); } static void lb_port_disabled(struct team *team, struct team_port *port) { lb_tx_hash_to_port_mapping_null_port(team, port); } static const struct team_mode_ops lb_mode_ops = { .init = lb_init, .exit = lb_exit, .port_enter = lb_port_enter, .port_leave = lb_port_leave, .port_disabled = lb_port_disabled, .receive = lb_receive, .transmit = lb_transmit, }; static const struct team_mode lb_mode = { .kind = "loadbalance", .owner = THIS_MODULE, .priv_size = sizeof(struct lb_priv), .port_priv_size = sizeof(struct lb_port_priv), .ops = &lb_mode_ops, .lag_tx_type = NETDEV_LAG_TX_TYPE_HASH, }; static int __init lb_init_module(void) { return team_mode_register(&lb_mode); } static void __exit lb_cleanup_module(void) { team_mode_unregister(&lb_mode); } module_init(lb_init_module); module_exit(lb_cleanup_module); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Jiri Pirko <[email protected]>"); MODULE_DESCRIPTION("Load-balancing mode for team"); MODULE_ALIAS_TEAM_MODE("loadbalance");
// SPDX-License-Identifier: GPL-2.0 /* * Altera PCIe MSI support * * Author: Ley Foon Tan <[email protected]> * * Copyright Altera Corporation (C) 2013-2015. All rights reserved */ #include <linux/interrupt.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/init.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/slab.h> #define MSI_STATUS 0x0 #define MSI_ERROR 0x4 #define MSI_INTMASK 0x8 #define MAX_MSI_VECTORS 32 struct altera_msi { DECLARE_BITMAP(used, MAX_MSI_VECTORS); struct mutex lock; /* protect "used" bitmap */ struct platform_device *pdev; struct irq_domain *msi_domain; struct irq_domain *inner_domain; void __iomem *csr_base; void __iomem *vector_base; phys_addr_t vector_phy; u32 num_of_vectors; int irq; }; static inline void msi_writel(struct altera_msi *msi, const u32 value, const u32 reg) { writel_relaxed(value, msi->csr_base + reg); } static inline u32 msi_readl(struct altera_msi *msi, const u32 reg) { return readl_relaxed(msi->csr_base + reg); } static void altera_msi_isr(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct altera_msi *msi; unsigned long status; u32 bit; int ret; chained_irq_enter(chip, desc); msi = irq_desc_get_handler_data(desc); while ((status = msi_readl(msi, MSI_STATUS)) != 0) { for_each_set_bit(bit, &status, msi->num_of_vectors) { /* Dummy read from vector to clear the interrupt */ readl_relaxed(msi->vector_base + (bit * sizeof(u32))); ret = generic_handle_domain_irq(msi->inner_domain, bit); if (ret) dev_err_ratelimited(&msi->pdev->dev, "unexpected MSI\n"); } } chained_irq_exit(chip, desc); } static struct irq_chip altera_msi_irq_chip = { .name = "Altera PCIe MSI", .irq_mask = pci_msi_mask_irq, .irq_unmask = pci_msi_unmask_irq, }; static struct msi_domain_info altera_msi_domain_info = { .flags = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | MSI_FLAG_NO_AFFINITY | MSI_FLAG_PCI_MSIX, .chip = &altera_msi_irq_chip, }; static void altera_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) { struct altera_msi *msi = irq_data_get_irq_chip_data(data); phys_addr_t addr = msi->vector_phy + (data->hwirq * sizeof(u32)); msg->address_lo = lower_32_bits(addr); msg->address_hi = upper_32_bits(addr); msg->data = data->hwirq; dev_dbg(&msi->pdev->dev, "msi#%d address_hi %#x address_lo %#x\n", (int)data->hwirq, msg->address_hi, msg->address_lo); } static struct irq_chip altera_msi_bottom_irq_chip = { .name = "Altera MSI", .irq_compose_msi_msg = altera_compose_msi_msg, }; static int altera_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *args) { struct altera_msi *msi = domain->host_data; unsigned long bit; u32 mask; WARN_ON(nr_irqs != 1); mutex_lock(&msi->lock); bit = find_first_zero_bit(msi->used, msi->num_of_vectors); if (bit >= msi->num_of_vectors) { mutex_unlock(&msi->lock); return -ENOSPC; } set_bit(bit, msi->used); mutex_unlock(&msi->lock); irq_domain_set_info(domain, virq, bit, &altera_msi_bottom_irq_chip, domain->host_data, handle_simple_irq, NULL, NULL); mask = msi_readl(msi, MSI_INTMASK); mask |= 1 << bit; msi_writel(msi, mask, MSI_INTMASK); return 0; } static void altera_irq_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *d = irq_domain_get_irq_data(domain, virq); struct altera_msi *msi = irq_data_get_irq_chip_data(d); u32 mask; mutex_lock(&msi->lock); if (!test_bit(d->hwirq, msi->used)) { dev_err(&msi->pdev->dev, "trying to free unused MSI#%lu\n", d->hwirq); } else { __clear_bit(d->hwirq, msi->used); mask = msi_readl(msi, MSI_INTMASK); mask &= ~(1 << d->hwirq); msi_writel(msi, mask, MSI_INTMASK); } mutex_unlock(&msi->lock); } static const struct irq_domain_ops msi_domain_ops = { .alloc = altera_irq_domain_alloc, .free = altera_irq_domain_free, }; static int altera_allocate_domains(struct altera_msi *msi) { struct fwnode_handle *fwnode = of_node_to_fwnode(msi->pdev->dev.of_node); msi->inner_domain = irq_domain_add_linear(NULL, msi->num_of_vectors, &msi_domain_ops, msi); if (!msi->inner_domain) { dev_err(&msi->pdev->dev, "failed to create IRQ domain\n"); return -ENOMEM; } msi->msi_domain = pci_msi_create_irq_domain(fwnode, &altera_msi_domain_info, msi->inner_domain); if (!msi->msi_domain) { dev_err(&msi->pdev->dev, "failed to create MSI domain\n"); irq_domain_remove(msi->inner_domain); return -ENOMEM; } return 0; } static void altera_free_domains(struct altera_msi *msi) { irq_domain_remove(msi->msi_domain); irq_domain_remove(msi->inner_domain); } static void altera_msi_remove(struct platform_device *pdev) { struct altera_msi *msi = platform_get_drvdata(pdev); msi_writel(msi, 0, MSI_INTMASK); irq_set_chained_handler_and_data(msi->irq, NULL, NULL); altera_free_domains(msi); platform_set_drvdata(pdev, NULL); } static int altera_msi_probe(struct platform_device *pdev) { struct altera_msi *msi; struct device_node *np = pdev->dev.of_node; struct resource *res; int ret; msi = devm_kzalloc(&pdev->dev, sizeof(struct altera_msi), GFP_KERNEL); if (!msi) return -ENOMEM; mutex_init(&msi->lock); msi->pdev = pdev; msi->csr_base = devm_platform_ioremap_resource_byname(pdev, "csr"); if (IS_ERR(msi->csr_base)) { dev_err(&pdev->dev, "failed to map csr memory\n"); return PTR_ERR(msi->csr_base); } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vector_slave"); msi->vector_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(msi->vector_base)) return PTR_ERR(msi->vector_base); msi->vector_phy = res->start; if (of_property_read_u32(np, "num-vectors", &msi->num_of_vectors)) { dev_err(&pdev->dev, "failed to parse the number of vectors\n"); return -EINVAL; } ret = altera_allocate_domains(msi); if (ret) return ret; msi->irq = platform_get_irq(pdev, 0); if (msi->irq < 0) { ret = msi->irq; goto err; } irq_set_chained_handler_and_data(msi->irq, altera_msi_isr, msi); platform_set_drvdata(pdev, msi); return 0; err: altera_msi_remove(pdev); return ret; } static const struct of_device_id altera_msi_of_match[] = { { .compatible = "altr,msi-1.0", NULL }, { }, }; static struct platform_driver altera_msi_driver = { .driver = { .name = "altera-msi", .of_match_table = altera_msi_of_match, }, .probe = altera_msi_probe, .remove = altera_msi_remove, }; static int __init altera_msi_init(void) { return platform_driver_register(&altera_msi_driver); } static void __exit altera_msi_exit(void) { platform_driver_unregister(&altera_msi_driver); } subsys_initcall(altera_msi_init); MODULE_DEVICE_TABLE(of, altera_msi_of_match); module_exit(altera_msi_exit); MODULE_DESCRIPTION("Altera PCIe MSI support driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0 /* * Device Tree file for d2 Network v2 * * Copyright (C) 2014 Simon Guinot <[email protected]> * */ /dts-v1/; #include <dt-bindings/leds/leds-ns2.h> #include "kirkwood-netxbig.dtsi" / { model = "LaCie d2 Network v2"; compatible = "lacie,d2net_v2", "lacie,netxbig", "marvell,kirkwood-88f6281", "marvell,kirkwood"; memory { device_type = "memory"; reg = <0x00000000 0x10000000>; }; ns2-leds { compatible = "lacie,ns2-leds"; blue-sata { label = "d2net_v2:blue:sata"; slow-gpio = <&gpio0 29 GPIO_ACTIVE_HIGH>; cmd-gpio = <&gpio0 30 GPIO_ACTIVE_HIGH>; modes-map = <NS_V2_LED_OFF 1 0 NS_V2_LED_ON 0 1 NS_V2_LED_ON 1 1 NS_V2_LED_SATA 0 0>; }; }; gpio-leds { compatible = "gpio-leds"; led-red-fail { label = "d2net_v2:red:fail"; gpios = <&gpio0 12 GPIO_ACTIVE_HIGH>; }; }; };
// SPDX-License-Identifier: LGPL-2.1 /* * trace/beauty/statx.c * * Copyright (C) 2017, Red Hat Inc, Arnaldo Carvalho de Melo <[email protected]> */ #include "trace/beauty/beauty.h" #include <sys/types.h> #include <linux/log2.h> static size_t statx__scnprintf_mask(unsigned long mask, char *bf, size_t size, bool show_prefix) { #include "trace/beauty/generated/statx_mask_array.c" static DEFINE_STRARRAY(statx_mask, "STATX_"); return strarray__scnprintf_flags(&strarray__statx_mask, bf, size, show_prefix, mask); } size_t syscall_arg__scnprintf_statx_mask(char *bf, size_t size, struct syscall_arg *arg) { bool show_prefix = arg->show_string_prefix; int mask = arg->val; return statx__scnprintf_mask(mask, bf, size, show_prefix); }
// SPDX-License-Identifier: GPL-2.0-or-later /* * MOSCHIP MCS7830 based (7730/7830/7832) USB 2.0 Ethernet Devices * * based on usbnet.c, asix.c and the vendor provided mcs7830 driver * * Copyright (C) 2010 Andreas Mohr <[email protected]> * Copyright (C) 2006 Arnd Bergmann <[email protected]> * Copyright (C) 2003-2005 David Hollis <[email protected]> * Copyright (C) 2005 Phil Chang <[email protected]> * Copyright (c) 2002-2003 TiVo Inc. * * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!). * * 2010-12-19: add 7832 USB PID ("functionality same as MCS7830"), * per active notification by manufacturer * * TODO: * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?) * - implement ethtool_ops get_pauseparam/set_pauseparam * via HIF_REG_PAUSE_THRESHOLD (>= revision C only!) * - implement get_eeprom/[set_eeprom] * - switch PHY on/off on ifup/ifdown (perhaps in usbnet.c, via MII) * - mcs7830_get_regs() handling is weird: for rev 2 we return 32 regs, * can access only ~ 24, remaining user buffer is uninitialized garbage * - anything else? */ #include <linux/crc32.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/usbnet.h> /* requests */ #define MCS7830_RD_BMREQ (USB_DIR_IN | USB_TYPE_VENDOR | \ USB_RECIP_DEVICE) #define MCS7830_WR_BMREQ (USB_DIR_OUT | USB_TYPE_VENDOR | \ USB_RECIP_DEVICE) #define MCS7830_RD_BREQ 0x0E #define MCS7830_WR_BREQ 0x0D #define MCS7830_CTRL_TIMEOUT 1000 #define MCS7830_MAX_MCAST 64 #define MCS7830_VENDOR_ID 0x9710 #define MCS7832_PRODUCT_ID 0x7832 #define MCS7830_PRODUCT_ID 0x7830 #define MCS7730_PRODUCT_ID 0x7730 #define SITECOM_VENDOR_ID 0x0DF6 #define LN_030_PRODUCT_ID 0x0021 #define MCS7830_MII_ADVERTISE (ADVERTISE_PAUSE_CAP | ADVERTISE_100FULL | \ ADVERTISE_100HALF | ADVERTISE_10FULL | \ ADVERTISE_10HALF | ADVERTISE_CSMA) /* HIF_REG_XX corresponding index value */ enum { HIF_REG_MULTICAST_HASH = 0x00, HIF_REG_PACKET_GAP1 = 0x08, HIF_REG_PACKET_GAP2 = 0x09, HIF_REG_PHY_DATA = 0x0a, HIF_REG_PHY_CMD1 = 0x0c, HIF_REG_PHY_CMD1_READ = 0x40, HIF_REG_PHY_CMD1_WRITE = 0x20, HIF_REG_PHY_CMD1_PHYADDR = 0x01, HIF_REG_PHY_CMD2 = 0x0d, HIF_REG_PHY_CMD2_PEND_FLAG_BIT = 0x80, HIF_REG_PHY_CMD2_READY_FLAG_BIT = 0x40, HIF_REG_CONFIG = 0x0e, /* hmm, spec sez: "R/W", "Except bit 3" (likely TXENABLE). */ HIF_REG_CONFIG_CFG = 0x80, HIF_REG_CONFIG_SPEED100 = 0x40, HIF_REG_CONFIG_FULLDUPLEX_ENABLE = 0x20, HIF_REG_CONFIG_RXENABLE = 0x10, HIF_REG_CONFIG_TXENABLE = 0x08, HIF_REG_CONFIG_SLEEPMODE = 0x04, HIF_REG_CONFIG_ALLMULTICAST = 0x02, HIF_REG_CONFIG_PROMISCUOUS = 0x01, HIF_REG_ETHERNET_ADDR = 0x0f, HIF_REG_FRAME_DROP_COUNTER = 0x15, /* 0..ff; reset: 0 */ HIF_REG_PAUSE_THRESHOLD = 0x16, HIF_REG_PAUSE_THRESHOLD_DEFAULT = 0, }; /* Trailing status byte in Ethernet Rx frame */ enum { MCS7830_RX_SHORT_FRAME = 0x01, /* < 64 bytes */ MCS7830_RX_LENGTH_ERROR = 0x02, /* framelen != Ethernet length field */ MCS7830_RX_ALIGNMENT_ERROR = 0x04, /* non-even number of nibbles */ MCS7830_RX_CRC_ERROR = 0x08, MCS7830_RX_LARGE_FRAME = 0x10, /* > 1518 bytes */ MCS7830_RX_FRAME_CORRECT = 0x20, /* frame is correct */ /* [7:6] reserved */ }; struct mcs7830_data { u8 multi_filter[8]; u8 config; }; static const char driver_name[] = "MOSCHIP usb-ethernet driver"; static int mcs7830_get_reg(struct usbnet *dev, u16 index, u16 size, void *data) { int ret; ret = usbnet_read_cmd(dev, MCS7830_RD_BREQ, MCS7830_RD_BMREQ, 0x0000, index, data, size); if (ret < 0) return ret; else if (ret < size) return -ENODATA; return ret; } static int mcs7830_set_reg(struct usbnet *dev, u16 index, u16 size, const void *data) { return usbnet_write_cmd(dev, MCS7830_WR_BREQ, MCS7830_WR_BMREQ, 0x0000, index, data, size); } static void mcs7830_set_reg_async(struct usbnet *dev, u16 index, u16 size, void *data) { usbnet_write_cmd_async(dev, MCS7830_WR_BREQ, MCS7830_WR_BMREQ, 0x0000, index, data, size); } static int mcs7830_hif_get_mac_address(struct usbnet *dev, unsigned char *addr) { int ret = mcs7830_get_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr); if (ret < 0) return ret; return 0; } static int mcs7830_hif_set_mac_address(struct usbnet *dev, const unsigned char *addr) { int ret = mcs7830_set_reg(dev, HIF_REG_ETHERNET_ADDR, ETH_ALEN, addr); if (ret < 0) return ret; return 0; } static int mcs7830_set_mac_address(struct net_device *netdev, void *p) { int ret; struct usbnet *dev = netdev_priv(netdev); struct sockaddr *addr = p; if (netif_running(netdev)) return -EBUSY; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; ret = mcs7830_hif_set_mac_address(dev, addr->sa_data); if (ret < 0) return ret; /* it worked --> adopt it on netdev side */ eth_hw_addr_set(netdev, addr->sa_data); return 0; } static int mcs7830_read_phy(struct usbnet *dev, u8 index) { int ret; int i; __le16 val; u8 cmd[2] = { HIF_REG_PHY_CMD1_READ | HIF_REG_PHY_CMD1_PHYADDR, HIF_REG_PHY_CMD2_PEND_FLAG_BIT | index, }; mutex_lock(&dev->phy_mutex); /* write the MII command */ ret = mcs7830_set_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if (ret < 0) goto out; /* wait for the data to become valid, should be within < 1ms */ for (i = 0; i < 10; i++) { ret = mcs7830_get_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if ((ret < 0) || (cmd[1] & HIF_REG_PHY_CMD2_READY_FLAG_BIT)) break; ret = -EIO; msleep(1); } if (ret < 0) goto out; /* read actual register contents */ ret = mcs7830_get_reg(dev, HIF_REG_PHY_DATA, 2, &val); if (ret < 0) goto out; ret = le16_to_cpu(val); dev_dbg(&dev->udev->dev, "read PHY reg %02x: %04x (%d tries)\n", index, val, i); out: mutex_unlock(&dev->phy_mutex); return ret; } static int mcs7830_write_phy(struct usbnet *dev, u8 index, u16 val) { int ret; int i; __le16 le_val; u8 cmd[2] = { HIF_REG_PHY_CMD1_WRITE | HIF_REG_PHY_CMD1_PHYADDR, HIF_REG_PHY_CMD2_PEND_FLAG_BIT | (index & 0x1F), }; mutex_lock(&dev->phy_mutex); /* write the new register contents */ le_val = cpu_to_le16(val); ret = mcs7830_set_reg(dev, HIF_REG_PHY_DATA, 2, &le_val); if (ret < 0) goto out; /* write the MII command */ ret = mcs7830_set_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if (ret < 0) goto out; /* wait for the command to be accepted by the PHY */ for (i = 0; i < 10; i++) { ret = mcs7830_get_reg(dev, HIF_REG_PHY_CMD1, 2, cmd); if ((ret < 0) || (cmd[1] & HIF_REG_PHY_CMD2_READY_FLAG_BIT)) break; ret = -EIO; msleep(1); } if (ret < 0) goto out; ret = 0; dev_dbg(&dev->udev->dev, "write PHY reg %02x: %04x (%d tries)\n", index, val, i); out: mutex_unlock(&dev->phy_mutex); return ret; } /* * This algorithm comes from the original mcs7830 version 1.4 driver, * not sure if it is needed. */ static int mcs7830_set_autoneg(struct usbnet *dev, int ptrUserPhyMode) { int ret; /* Enable all media types */ ret = mcs7830_write_phy(dev, MII_ADVERTISE, MCS7830_MII_ADVERTISE); /* First reset BMCR */ if (!ret) ret = mcs7830_write_phy(dev, MII_BMCR, 0x0000); /* Enable Auto Neg */ if (!ret) ret = mcs7830_write_phy(dev, MII_BMCR, BMCR_ANENABLE); /* Restart Auto Neg (Keep the Enable Auto Neg Bit Set) */ if (!ret) ret = mcs7830_write_phy(dev, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART ); return ret; } /* * if we can read register 22, the chip revision is C or higher */ static int mcs7830_get_rev(struct usbnet *dev) { u8 dummy[2]; int ret; ret = mcs7830_get_reg(dev, HIF_REG_FRAME_DROP_COUNTER, 2, dummy); if (ret > 0) return 2; /* Rev C or later */ return 1; /* earlier revision */ } /* * On rev. C we need to set the pause threshold */ static void mcs7830_rev_C_fixup(struct usbnet *dev) { u8 pause_threshold = HIF_REG_PAUSE_THRESHOLD_DEFAULT; int retry; for (retry = 0; retry < 2; retry++) { if (mcs7830_get_rev(dev) == 2) { dev_info(&dev->udev->dev, "applying rev.C fixup\n"); mcs7830_set_reg(dev, HIF_REG_PAUSE_THRESHOLD, 1, &pause_threshold); } msleep(1); } } static int mcs7830_mdio_read(struct net_device *netdev, int phy_id, int location) { struct usbnet *dev = netdev_priv(netdev); return mcs7830_read_phy(dev, location); } static void mcs7830_mdio_write(struct net_device *netdev, int phy_id, int location, int val) { struct usbnet *dev = netdev_priv(netdev); mcs7830_write_phy(dev, location, val); } static int mcs7830_ioctl(struct net_device *net, struct ifreq *rq, int cmd) { struct usbnet *dev = netdev_priv(net); return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL); } static inline struct mcs7830_data *mcs7830_get_data(struct usbnet *dev) { return (struct mcs7830_data *)&dev->data; } static void mcs7830_hif_update_multicast_hash(struct usbnet *dev) { struct mcs7830_data *data = mcs7830_get_data(dev); mcs7830_set_reg_async(dev, HIF_REG_MULTICAST_HASH, sizeof data->multi_filter, data->multi_filter); } static void mcs7830_hif_update_config(struct usbnet *dev) { /* implementation specific to data->config (argument needs to be heap-based anyway - USB DMA!) */ struct mcs7830_data *data = mcs7830_get_data(dev); mcs7830_set_reg_async(dev, HIF_REG_CONFIG, 1, &data->config); } static void mcs7830_data_set_multicast(struct net_device *net) { struct usbnet *dev = netdev_priv(net); struct mcs7830_data *data = mcs7830_get_data(dev); memset(data->multi_filter, 0, sizeof data->multi_filter); data->config = HIF_REG_CONFIG_TXENABLE; /* this should not be needed, but it doesn't work otherwise */ data->config |= HIF_REG_CONFIG_ALLMULTICAST; if (net->flags & IFF_PROMISC) { data->config |= HIF_REG_CONFIG_PROMISCUOUS; } else if (net->flags & IFF_ALLMULTI || netdev_mc_count(net) > MCS7830_MAX_MCAST) { data->config |= HIF_REG_CONFIG_ALLMULTICAST; } else if (netdev_mc_empty(net)) { /* just broadcast and directed */ } else { /* We use the 20 byte dev->data * for our 8 byte filter buffer * to avoid allocating memory that * is tricky to free later */ struct netdev_hw_addr *ha; u32 crc_bits; /* Build the multicast hash filter. */ netdev_for_each_mc_addr(ha, net) { crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26; data->multi_filter[crc_bits >> 3] |= 1 << (crc_bits & 7); } } } static int mcs7830_apply_base_config(struct usbnet *dev) { int ret; /* re-configure known MAC (suspend case etc.) */ ret = mcs7830_hif_set_mac_address(dev, dev->net->dev_addr); if (ret) { dev_info(&dev->udev->dev, "Cannot set MAC address\n"); goto out; } /* Set up PHY */ ret = mcs7830_set_autoneg(dev, 0); if (ret) { dev_info(&dev->udev->dev, "Cannot set autoneg\n"); goto out; } mcs7830_hif_update_multicast_hash(dev); mcs7830_hif_update_config(dev); mcs7830_rev_C_fixup(dev); ret = 0; out: return ret; } /* credits go to asix_set_multicast */ static void mcs7830_set_multicast(struct net_device *net) { struct usbnet *dev = netdev_priv(net); mcs7830_data_set_multicast(net); mcs7830_hif_update_multicast_hash(dev); mcs7830_hif_update_config(dev); } static int mcs7830_get_regs_len(struct net_device *net) { struct usbnet *dev = netdev_priv(net); switch (mcs7830_get_rev(dev)) { case 1: return 21; case 2: return 32; } return 0; } static void mcs7830_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *drvinfo) { usbnet_get_drvinfo(net, drvinfo); } static void mcs7830_get_regs(struct net_device *net, struct ethtool_regs *regs, void *data) { struct usbnet *dev = netdev_priv(net); regs->version = mcs7830_get_rev(dev); mcs7830_get_reg(dev, 0, regs->len, data); } static const struct ethtool_ops mcs7830_ethtool_ops = { .get_drvinfo = mcs7830_get_drvinfo, .get_regs_len = mcs7830_get_regs_len, .get_regs = mcs7830_get_regs, /* common usbnet calls */ .get_link = usbnet_get_link, .get_msglevel = usbnet_get_msglevel, .set_msglevel = usbnet_set_msglevel, .nway_reset = usbnet_nway_reset, .get_link_ksettings = usbnet_get_link_ksettings_mii, .set_link_ksettings = usbnet_set_link_ksettings_mii, }; static const struct net_device_ops mcs7830_netdev_ops = { .ndo_open = usbnet_open, .ndo_stop = usbnet_stop, .ndo_start_xmit = usbnet_start_xmit, .ndo_tx_timeout = usbnet_tx_timeout, .ndo_change_mtu = usbnet_change_mtu, .ndo_get_stats64 = dev_get_tstats64, .ndo_validate_addr = eth_validate_addr, .ndo_eth_ioctl = mcs7830_ioctl, .ndo_set_rx_mode = mcs7830_set_multicast, .ndo_set_mac_address = mcs7830_set_mac_address, }; static int mcs7830_bind(struct usbnet *dev, struct usb_interface *udev) { struct net_device *net = dev->net; u8 addr[ETH_ALEN]; int ret; int retry; /* Initial startup: Gather MAC address setting from EEPROM */ ret = -EINVAL; for (retry = 0; retry < 5 && ret; retry++) ret = mcs7830_hif_get_mac_address(dev, addr); if (ret) { dev_warn(&dev->udev->dev, "Cannot read MAC address\n"); goto out; } eth_hw_addr_set(net, addr); mcs7830_data_set_multicast(net); ret = mcs7830_apply_base_config(dev); if (ret) goto out; net->ethtool_ops = &mcs7830_ethtool_ops; net->netdev_ops = &mcs7830_netdev_ops; /* reserve space for the status byte on rx */ dev->rx_urb_size = ETH_FRAME_LEN + 1; dev->mii.mdio_read = mcs7830_mdio_read; dev->mii.mdio_write = mcs7830_mdio_write; dev->mii.dev = net; dev->mii.phy_id_mask = 0x3f; dev->mii.reg_num_mask = 0x1f; dev->mii.phy_id = *((u8 *) net->dev_addr + 1); ret = usbnet_get_endpoints(dev, udev); out: return ret; } /* The chip always appends a status byte that we need to strip */ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { u8 status; /* This check is no longer done by usbnet */ if (skb->len < dev->net->hard_header_len) { dev_err(&dev->udev->dev, "unexpected tiny rx frame\n"); return 0; } skb_trim(skb, skb->len - 1); status = skb->data[skb->len]; if (status != MCS7830_RX_FRAME_CORRECT) { dev_dbg(&dev->udev->dev, "rx fixup status %x\n", status); /* hmm, perhaps usbnet.c already sees a globally visible frame error and increments rx_errors on its own already? */ dev->net->stats.rx_errors++; if (status & (MCS7830_RX_SHORT_FRAME |MCS7830_RX_LENGTH_ERROR |MCS7830_RX_LARGE_FRAME)) dev->net->stats.rx_length_errors++; if (status & MCS7830_RX_ALIGNMENT_ERROR) dev->net->stats.rx_frame_errors++; if (status & MCS7830_RX_CRC_ERROR) dev->net->stats.rx_crc_errors++; } return skb->len > 0; } static void mcs7830_status(struct usbnet *dev, struct urb *urb) { u8 *buf = urb->transfer_buffer; bool link, link_changed; if (urb->actual_length < 16) return; link = !(buf[1] == 0x20); link_changed = netif_carrier_ok(dev->net) != link; if (link_changed) { usbnet_link_change(dev, link, 0); netdev_dbg(dev->net, "Link Status is: %d\n", link); } } static const struct driver_info moschip_info = { .description = "MOSCHIP 7830/7832/7730 usb-NET adapter", .bind = mcs7830_bind, .rx_fixup = mcs7830_rx_fixup, .flags = FLAG_ETHER | FLAG_LINK_INTR, .status = mcs7830_status, .in = 1, .out = 2, }; static const struct driver_info sitecom_info = { .description = "Sitecom LN-30 usb-NET adapter", .bind = mcs7830_bind, .rx_fixup = mcs7830_rx_fixup, .flags = FLAG_ETHER | FLAG_LINK_INTR, .status = mcs7830_status, .in = 1, .out = 2, }; static const struct usb_device_id products[] = { { USB_DEVICE(MCS7830_VENDOR_ID, MCS7832_PRODUCT_ID), .driver_info = (unsigned long) &moschip_info, }, { USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID), .driver_info = (unsigned long) &moschip_info, }, { USB_DEVICE(MCS7830_VENDOR_ID, MCS7730_PRODUCT_ID), .driver_info = (unsigned long) &moschip_info, }, { USB_DEVICE(SITECOM_VENDOR_ID, LN_030_PRODUCT_ID), .driver_info = (unsigned long) &sitecom_info, }, {}, }; MODULE_DEVICE_TABLE(usb, products); static int mcs7830_reset_resume (struct usb_interface *intf) { /* YES, this function is successful enough that ethtool -d does show same output pre-/post-suspend */ struct usbnet *dev = usb_get_intfdata(intf); mcs7830_apply_base_config(dev); usbnet_resume(intf); return 0; } static struct usb_driver mcs7830_driver = { .name = driver_name, .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .reset_resume = mcs7830_reset_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(mcs7830_driver); MODULE_DESCRIPTION("USB to network adapter MCS7830)"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-only */ /* * omap iommu: pagetable definitions * * Copyright (C) 2008-2010 Nokia Corporation * * Written by Hiroshi DOYU <[email protected]> */ #ifndef _OMAP_IOPGTABLE_H #define _OMAP_IOPGTABLE_H #include <linux/bitops.h> /* * "L2 table" address mask and size definitions. */ #define IOPGD_SHIFT 20 #define IOPGD_SIZE BIT(IOPGD_SHIFT) #define IOPGD_MASK (~(IOPGD_SIZE - 1)) /* * "section" address mask and size definitions. */ #define IOSECTION_SHIFT 20 #define IOSECTION_SIZE BIT(IOSECTION_SHIFT) #define IOSECTION_MASK (~(IOSECTION_SIZE - 1)) /* * "supersection" address mask and size definitions. */ #define IOSUPER_SHIFT 24 #define IOSUPER_SIZE BIT(IOSUPER_SHIFT) #define IOSUPER_MASK (~(IOSUPER_SIZE - 1)) #define PTRS_PER_IOPGD (1UL << (32 - IOPGD_SHIFT)) #define IOPGD_TABLE_SIZE (PTRS_PER_IOPGD * sizeof(u32)) /* * "small page" address mask and size definitions. */ #define IOPTE_SHIFT 12 #define IOPTE_SIZE BIT(IOPTE_SHIFT) #define IOPTE_MASK (~(IOPTE_SIZE - 1)) /* * "large page" address mask and size definitions. */ #define IOLARGE_SHIFT 16 #define IOLARGE_SIZE BIT(IOLARGE_SHIFT) #define IOLARGE_MASK (~(IOLARGE_SIZE - 1)) #define PTRS_PER_IOPTE (1UL << (IOPGD_SHIFT - IOPTE_SHIFT)) #define IOPTE_TABLE_SIZE (PTRS_PER_IOPTE * sizeof(u32)) #define IOPAGE_MASK IOPTE_MASK /** * omap_iommu_translate() - va to pa translation * @d: omap iommu descriptor * @va: virtual address * @mask: omap iommu descriptor mask * * va to pa translation */ static inline phys_addr_t omap_iommu_translate(unsigned long d, dma_addr_t va, dma_addr_t mask) { return (d & mask) | (va & (~mask)); } /* * some descriptor attributes. */ #define IOPGD_TABLE (1) #define IOPGD_SECTION (2) #define IOPGD_SUPER (BIT(18) | IOPGD_SECTION) #define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE) #define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION) #define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER) #define IOPTE_SMALL (2) #define IOPTE_LARGE (1) #define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL) #define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE) /* to find an entry in a page-table-directory */ #define iopgd_index(da) (((da) >> IOPGD_SHIFT) & (PTRS_PER_IOPGD - 1)) #define iopgd_offset(obj, da) ((obj)->iopgd + iopgd_index(da)) #define iopgd_page_paddr(iopgd) (*iopgd & ~((1 << 10) - 1)) #define iopgd_page_vaddr(iopgd) ((u32 *)phys_to_virt(iopgd_page_paddr(iopgd))) /* to find an entry in the second-level page table. */ #define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1)) #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da)) #endif /* _OMAP_IOPGTABLE_H */
/* SPDX-License-Identifier: GPL-2.0-only */ /******************************************************************************* Copyright (C) 2007-2009 STMicroelectronics Ltd Author: Giuseppe Cavallaro <[email protected]> *******************************************************************************/ #ifndef __DWMAC1000_H__ #define __DWMAC1000_H__ #include <linux/phy.h> #include "common.h" #define GMAC_CONTROL 0x00000000 /* Configuration */ #define GMAC_FRAME_FILTER 0x00000004 /* Frame Filter */ #define GMAC_HASH_HIGH 0x00000008 /* Multicast Hash Table High */ #define GMAC_HASH_LOW 0x0000000c /* Multicast Hash Table Low */ #define GMAC_MII_ADDR 0x00000010 /* MII Address */ #define GMAC_MII_DATA 0x00000014 /* MII Data */ #define GMAC_FLOW_CTRL 0x00000018 /* Flow Control */ #define GMAC_VLAN_TAG 0x0000001c /* VLAN Tag */ #define GMAC_DEBUG 0x00000024 /* GMAC debug register */ #define GMAC_WAKEUP_FILTER 0x00000028 /* Wake-up Frame Filter */ #define GMAC_INT_STATUS 0x00000038 /* interrupt status register */ #define GMAC_INT_STATUS_PMT BIT(3) #define GMAC_INT_STATUS_MMCIS BIT(4) #define GMAC_INT_STATUS_MMCRIS BIT(5) #define GMAC_INT_STATUS_MMCTIS BIT(6) #define GMAC_INT_STATUS_MMCCSUM BIT(7) #define GMAC_INT_STATUS_TSTAMP BIT(9) #define GMAC_INT_STATUS_LPIIS BIT(10) /* interrupt mask register */ #define GMAC_INT_MASK 0x0000003c #define GMAC_INT_DISABLE_RGMII BIT(0) #define GMAC_INT_DISABLE_PCSLINK BIT(1) #define GMAC_INT_DISABLE_PCSAN BIT(2) #define GMAC_INT_DISABLE_PMT BIT(3) #define GMAC_INT_DISABLE_TIMESTAMP BIT(9) #define GMAC_INT_DISABLE_PCS (GMAC_INT_DISABLE_RGMII | \ GMAC_INT_DISABLE_PCSLINK | \ GMAC_INT_DISABLE_PCSAN) #define GMAC_INT_DEFAULT_MASK (GMAC_INT_DISABLE_TIMESTAMP | \ GMAC_INT_DISABLE_PCS) /* PMT Control and Status */ #define GMAC_PMT 0x0000002c enum power_event { pointer_reset = 0x80000000, global_unicast = 0x00000200, wake_up_rx_frame = 0x00000040, magic_frame = 0x00000020, wake_up_frame_en = 0x00000004, magic_pkt_en = 0x00000002, power_down = 0x00000001, }; /* Energy Efficient Ethernet (EEE) * * LPI status, timer and control register offset */ #define LPI_CTRL_STATUS 0x0030 #define LPI_TIMER_CTRL 0x0034 /* LPI control and status defines */ #define LPI_CTRL_STATUS_LPITXA 0x00080000 /* Enable LPI TX Automate */ #define LPI_CTRL_STATUS_PLSEN 0x00040000 /* Enable PHY Link Status */ #define LPI_CTRL_STATUS_PLS 0x00020000 /* PHY Link Status */ #define LPI_CTRL_STATUS_LPIEN 0x00010000 /* LPI Enable */ #define LPI_CTRL_STATUS_RLPIST 0x00000200 /* Receive LPI state */ #define LPI_CTRL_STATUS_TLPIST 0x00000100 /* Transmit LPI state */ #define LPI_CTRL_STATUS_RLPIEX 0x00000008 /* Receive LPI Exit */ #define LPI_CTRL_STATUS_RLPIEN 0x00000004 /* Receive LPI Entry */ #define LPI_CTRL_STATUS_TLPIEX 0x00000002 /* Transmit LPI Exit */ #define LPI_CTRL_STATUS_TLPIEN 0x00000001 /* Transmit LPI Entry */ /* GMAC HW ADDR regs */ #define GMAC_ADDR_HIGH(reg) ((reg > 15) ? 0x00000800 + (reg - 16) * 8 : \ 0x00000040 + (reg * 8)) #define GMAC_ADDR_LOW(reg) ((reg > 15) ? 0x00000804 + (reg - 16) * 8 : \ 0x00000044 + (reg * 8)) #define GMAC_MAX_PERFECT_ADDRESSES 1 #define GMAC_PCS_BASE 0x000000c0 /* PCS register base */ #define GMAC_RGSMIIIS 0x000000d8 /* RGMII/SMII status */ /* SGMII/RGMII status register */ #define GMAC_RGSMIIIS_LNKMODE BIT(0) #define GMAC_RGSMIIIS_SPEED GENMASK(2, 1) #define GMAC_RGSMIIIS_SPEED_SHIFT 1 #define GMAC_RGSMIIIS_LNKSTS BIT(3) #define GMAC_RGSMIIIS_JABTO BIT(4) #define GMAC_RGSMIIIS_FALSECARDET BIT(5) #define GMAC_RGSMIIIS_SMIDRXS BIT(16) /* LNKMOD */ #define GMAC_RGSMIIIS_LNKMOD_MASK 0x1 /* LNKSPEED */ #define GMAC_RGSMIIIS_SPEED_125 0x2 #define GMAC_RGSMIIIS_SPEED_25 0x1 #define GMAC_RGSMIIIS_SPEED_2_5 0x0 /* GMAC Configuration defines */ #define GMAC_CONTROL_2K 0x08000000 /* IEEE 802.3as 2K packets */ #define GMAC_CONTROL_TC 0x01000000 /* Transmit Conf. in RGMII/SGMII */ #define GMAC_CONTROL_WD 0x00800000 /* Disable Watchdog on receive */ #define GMAC_CONTROL_JD 0x00400000 /* Jabber disable */ #define GMAC_CONTROL_BE 0x00200000 /* Frame Burst Enable */ #define GMAC_CONTROL_JE 0x00100000 /* Jumbo frame */ enum inter_frame_gap { GMAC_CONTROL_IFG_88 = 0x00040000, GMAC_CONTROL_IFG_80 = 0x00020000, GMAC_CONTROL_IFG_40 = 0x000e0000, }; #define GMAC_CONTROL_DCRS 0x00010000 /* Disable carrier sense */ #define GMAC_CONTROL_PS 0x00008000 /* Port Select 0:GMI 1:MII */ #define GMAC_CONTROL_FES 0x00004000 /* Speed 0:10 1:100 */ #define GMAC_CONTROL_DO 0x00002000 /* Disable Rx Own */ #define GMAC_CONTROL_LM 0x00001000 /* Loop-back mode */ #define GMAC_CONTROL_DM 0x00000800 /* Duplex Mode */ #define GMAC_CONTROL_IPC 0x00000400 /* Checksum Offload */ #define GMAC_CONTROL_DR 0x00000200 /* Disable Retry */ #define GMAC_CONTROL_LUD 0x00000100 /* Link up/down */ #define GMAC_CONTROL_ACS 0x00000080 /* Auto Pad/FCS Stripping */ #define GMAC_CONTROL_DC 0x00000010 /* Deferral Check */ #define GMAC_CONTROL_TE 0x00000008 /* Transmitter Enable */ #define GMAC_CONTROL_RE 0x00000004 /* Receiver Enable */ #define GMAC_CORE_INIT (GMAC_CONTROL_JD | GMAC_CONTROL_PS | \ GMAC_CONTROL_BE | GMAC_CONTROL_DCRS) /* GMAC Frame Filter defines */ #define GMAC_FRAME_FILTER_PR 0x00000001 /* Promiscuous Mode */ #define GMAC_FRAME_FILTER_HUC 0x00000002 /* Hash Unicast */ #define GMAC_FRAME_FILTER_HMC 0x00000004 /* Hash Multicast */ #define GMAC_FRAME_FILTER_DAIF 0x00000008 /* DA Inverse Filtering */ #define GMAC_FRAME_FILTER_PM 0x00000010 /* Pass all multicast */ #define GMAC_FRAME_FILTER_DBF 0x00000020 /* Disable Broadcast frames */ #define GMAC_FRAME_FILTER_PCF 0x00000080 /* Pass Control frames */ #define GMAC_FRAME_FILTER_SAIF 0x00000100 /* Inverse Filtering */ #define GMAC_FRAME_FILTER_SAF 0x00000200 /* Source Address Filter */ #define GMAC_FRAME_FILTER_HPF 0x00000400 /* Hash or perfect Filter */ #define GMAC_FRAME_FILTER_RA 0x80000000 /* Receive all mode */ /* GMII ADDR defines */ #define GMAC_MII_ADDR_WRITE 0x00000002 /* MII Write */ #define GMAC_MII_ADDR_BUSY 0x00000001 /* MII Busy */ /* GMAC FLOW CTRL defines */ #define GMAC_FLOW_CTRL_PT_MASK 0xffff0000 /* Pause Time Mask */ #define GMAC_FLOW_CTRL_PT_SHIFT 16 #define GMAC_FLOW_CTRL_UP 0x00000008 /* Unicast pause frame enable */ #define GMAC_FLOW_CTRL_RFE 0x00000004 /* Rx Flow Control Enable */ #define GMAC_FLOW_CTRL_TFE 0x00000002 /* Tx Flow Control Enable */ #define GMAC_FLOW_CTRL_FCB_BPA 0x00000001 /* Flow Control Busy ... */ /* DEBUG Register defines */ /* MTL TxStatus FIFO */ #define GMAC_DEBUG_TXSTSFSTS BIT(25) /* MTL TxStatus FIFO Full Status */ #define GMAC_DEBUG_TXFSTS BIT(24) /* MTL Tx FIFO Not Empty Status */ #define GMAC_DEBUG_TWCSTS BIT(22) /* MTL Tx FIFO Write Controller */ /* MTL Tx FIFO Read Controller Status */ #define GMAC_DEBUG_TRCSTS_MASK GENMASK(21, 20) #define GMAC_DEBUG_TRCSTS_SHIFT 20 #define GMAC_DEBUG_TRCSTS_IDLE 0 #define GMAC_DEBUG_TRCSTS_READ 1 #define GMAC_DEBUG_TRCSTS_TXW 2 #define GMAC_DEBUG_TRCSTS_WRITE 3 #define GMAC_DEBUG_TXPAUSED BIT(19) /* MAC Transmitter in PAUSE */ /* MAC Transmit Frame Controller Status */ #define GMAC_DEBUG_TFCSTS_MASK GENMASK(18, 17) #define GMAC_DEBUG_TFCSTS_SHIFT 17 #define GMAC_DEBUG_TFCSTS_IDLE 0 #define GMAC_DEBUG_TFCSTS_WAIT 1 #define GMAC_DEBUG_TFCSTS_GEN_PAUSE 2 #define GMAC_DEBUG_TFCSTS_XFER 3 /* MAC GMII or MII Transmit Protocol Engine Status */ #define GMAC_DEBUG_TPESTS BIT(16) #define GMAC_DEBUG_RXFSTS_MASK GENMASK(9, 8) /* MTL Rx FIFO Fill-level */ #define GMAC_DEBUG_RXFSTS_SHIFT 8 #define GMAC_DEBUG_RXFSTS_EMPTY 0 #define GMAC_DEBUG_RXFSTS_BT 1 #define GMAC_DEBUG_RXFSTS_AT 2 #define GMAC_DEBUG_RXFSTS_FULL 3 #define GMAC_DEBUG_RRCSTS_MASK GENMASK(6, 5) /* MTL Rx FIFO Read Controller */ #define GMAC_DEBUG_RRCSTS_SHIFT 5 #define GMAC_DEBUG_RRCSTS_IDLE 0 #define GMAC_DEBUG_RRCSTS_RDATA 1 #define GMAC_DEBUG_RRCSTS_RSTAT 2 #define GMAC_DEBUG_RRCSTS_FLUSH 3 #define GMAC_DEBUG_RWCSTS BIT(4) /* MTL Rx FIFO Write Controller Active */ /* MAC Receive Frame Controller FIFO Status */ #define GMAC_DEBUG_RFCFCSTS_MASK GENMASK(2, 1) #define GMAC_DEBUG_RFCFCSTS_SHIFT 1 /* MAC GMII or MII Receive Protocol Engine Status */ #define GMAC_DEBUG_RPESTS BIT(0) /*--- DMA BLOCK defines ---*/ /* DMA Bus Mode register defines */ #define DMA_BUS_MODE_DA 0x00000002 /* Arbitration scheme */ #define DMA_BUS_MODE_DSL_MASK 0x0000007c /* Descriptor Skip Length */ #define DMA_BUS_MODE_DSL_SHIFT 2 /* (in DWORDS) */ /* Programmable burst length (passed thorugh platform)*/ #define DMA_BUS_MODE_PBL_MASK 0x00003f00 /* Programmable Burst Len */ #define DMA_BUS_MODE_PBL_SHIFT 8 #define DMA_BUS_MODE_ATDS 0x00000080 /* Alternate Descriptor Size */ enum rx_tx_priority_ratio { double_ratio = 0x00004000, /* 2:1 */ triple_ratio = 0x00008000, /* 3:1 */ quadruple_ratio = 0x0000c000, /* 4:1 */ }; #define DMA_BUS_MODE_FB 0x00010000 /* Fixed burst */ #define DMA_BUS_MODE_MB 0x04000000 /* Mixed burst */ #define DMA_BUS_MODE_RPBL_MASK 0x007e0000 /* Rx-Programmable Burst Len */ #define DMA_BUS_MODE_RPBL_SHIFT 17 #define DMA_BUS_MODE_USP 0x00800000 #define DMA_BUS_MODE_MAXPBL 0x01000000 #define DMA_BUS_MODE_AAL 0x02000000 /* DMA CRS Control and Status Register Mapping */ #define DMA_HOST_TX_DESC 0x00001048 /* Current Host Tx descriptor */ #define DMA_HOST_RX_DESC 0x0000104c /* Current Host Rx descriptor */ /* DMA Bus Mode register defines */ #define DMA_BUS_PR_RATIO_MASK 0x0000c000 /* Rx/Tx priority ratio */ #define DMA_BUS_PR_RATIO_SHIFT 14 #define DMA_BUS_FB 0x00010000 /* Fixed Burst */ /* DMA operation mode defines (start/stop tx/rx are placed in common header)*/ /* Disable Drop TCP/IP csum error */ #define DMA_CONTROL_DT 0x04000000 #define DMA_CONTROL_RSF 0x02000000 /* Receive Store and Forward */ #define DMA_CONTROL_DFF 0x01000000 /* Disaable flushing */ /* Threshold for Activating the FC */ enum rfa { act_full_minus_1 = 0x00800000, act_full_minus_2 = 0x00800200, act_full_minus_3 = 0x00800400, act_full_minus_4 = 0x00800600, }; /* Threshold for Deactivating the FC */ enum rfd { deac_full_minus_1 = 0x00400000, deac_full_minus_2 = 0x00400800, deac_full_minus_3 = 0x00401000, deac_full_minus_4 = 0x00401800, }; #define DMA_CONTROL_TSF 0x00200000 /* Transmit Store and Forward */ enum ttc_control { DMA_CONTROL_TTC_64 = 0x00000000, DMA_CONTROL_TTC_128 = 0x00004000, DMA_CONTROL_TTC_192 = 0x00008000, DMA_CONTROL_TTC_256 = 0x0000c000, DMA_CONTROL_TTC_40 = 0x00010000, DMA_CONTROL_TTC_32 = 0x00014000, DMA_CONTROL_TTC_24 = 0x00018000, DMA_CONTROL_TTC_16 = 0x0001c000, }; #define DMA_CONTROL_TC_TX_MASK 0xfffe3fff #define DMA_CONTROL_EFC 0x00000100 #define DMA_CONTROL_FEF 0x00000080 #define DMA_CONTROL_FUF 0x00000040 /* Receive flow control activation field * RFA field in DMA control register, bits 23,10:9 */ #define DMA_CONTROL_RFA_MASK 0x00800600 /* Receive flow control deactivation field * RFD field in DMA control register, bits 22,12:11 */ #define DMA_CONTROL_RFD_MASK 0x00401800 /* RFD and RFA fields are encoded as follows * * Bit Field * 0,00 - Full minus 1KB (only valid when rxfifo >= 4KB and EFC enabled) * 0,01 - Full minus 2KB (only valid when rxfifo >= 4KB and EFC enabled) * 0,10 - Full minus 3KB (only valid when rxfifo >= 4KB and EFC enabled) * 0,11 - Full minus 4KB (only valid when rxfifo > 4KB and EFC enabled) * 1,00 - Full minus 5KB (only valid when rxfifo > 8KB and EFC enabled) * 1,01 - Full minus 6KB (only valid when rxfifo > 8KB and EFC enabled) * 1,10 - Full minus 7KB (only valid when rxfifo > 8KB and EFC enabled) * 1,11 - Reserved * * RFD should always be > RFA for a given FIFO size. RFD == RFA may work, * but packet throughput performance may not be as expected. * * Be sure that bit 3 in GMAC Register 6 is set for Unicast Pause frame * detection (IEEE Specification Requirement, Annex 31B, 31B.1, Pause * Description). * * Be sure that DZPA (bit 7 in Flow Control Register, GMAC Register 6), * is set to 0. This allows pause frames with a quanta of 0 to be sent * as an XOFF message to the link peer. */ #define RFA_FULL_MINUS_1K 0x00000000 #define RFA_FULL_MINUS_2K 0x00000200 #define RFA_FULL_MINUS_3K 0x00000400 #define RFA_FULL_MINUS_4K 0x00000600 #define RFA_FULL_MINUS_5K 0x00800000 #define RFA_FULL_MINUS_6K 0x00800200 #define RFA_FULL_MINUS_7K 0x00800400 #define RFD_FULL_MINUS_1K 0x00000000 #define RFD_FULL_MINUS_2K 0x00000800 #define RFD_FULL_MINUS_3K 0x00001000 #define RFD_FULL_MINUS_4K 0x00001800 #define RFD_FULL_MINUS_5K 0x00400000 #define RFD_FULL_MINUS_6K 0x00400800 #define RFD_FULL_MINUS_7K 0x00401000 enum rtc_control { DMA_CONTROL_RTC_64 = 0x00000000, DMA_CONTROL_RTC_32 = 0x00000008, DMA_CONTROL_RTC_96 = 0x00000010, DMA_CONTROL_RTC_128 = 0x00000018, }; #define DMA_CONTROL_TC_RX_MASK 0xffffffe7 #define DMA_CONTROL_OSF 0x00000004 /* Operate on second frame */ /* MMC registers offset */ #define GMAC_MMC_CTRL 0x100 #define GMAC_MMC_RX_INTR 0x104 #define GMAC_MMC_TX_INTR 0x108 #define GMAC_MMC_RX_CSUM_OFFLOAD 0x208 #define GMAC_EXTHASH_BASE 0x500 /* PTP and timestamping registers */ #define GMAC3_X_ATSNS GENMASK(19, 16) #define GMAC3_X_ATSNS_SHIFT 16 #define GMAC_PTP_TCR_ATSFC BIT(24) #define GMAC_PTP_TCR_ATSEN0 BIT(25) #define GMAC3_X_TIMESTAMP_STATUS 0x28 #define GMAC_PTP_ATNR 0x30 #define GMAC_PTP_ATSR 0x34 extern const struct stmmac_dma_ops dwmac1000_dma_ops; #endif /* __DWMAC1000_H__ */
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright(c) 2007 Yuri Tikhonov <[email protected]> * Copyright(c) 2009 Intel Corporation */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/raid/pq.h> #include <linux/async_tx.h> #include <linux/gfp.h> /* * struct pq_scribble_page - space to hold throwaway P or Q buffer for * synchronous gen_syndrome */ static struct page *pq_scribble_page; /* the struct page *blocks[] parameter passed to async_gen_syndrome() * and async_syndrome_val() contains the 'P' destination address at * blocks[disks-2] and the 'Q' destination address at blocks[disks-1] * * note: these are macros as they are used as lvalues */ #define P(b, d) (b[d-2]) #define Q(b, d) (b[d-1]) #define MAX_DISKS 255 /* * do_async_gen_syndrome - asynchronously calculate P and/or Q */ static __async_inline struct dma_async_tx_descriptor * do_async_gen_syndrome(struct dma_chan *chan, const unsigned char *scfs, int disks, struct dmaengine_unmap_data *unmap, enum dma_ctrl_flags dma_flags, struct async_submit_ctl *submit) { struct dma_async_tx_descriptor *tx = NULL; struct dma_device *dma = chan->device; enum async_tx_flags flags_orig = submit->flags; dma_async_tx_callback cb_fn_orig = submit->cb_fn; dma_async_tx_callback cb_param_orig = submit->cb_param; int src_cnt = disks - 2; unsigned short pq_src_cnt; dma_addr_t dma_dest[2]; int src_off = 0; while (src_cnt > 0) { submit->flags = flags_orig; pq_src_cnt = min(src_cnt, dma_maxpq(dma, dma_flags)); /* if we are submitting additional pqs, leave the chain open, * clear the callback parameters, and leave the destination * buffers mapped */ if (src_cnt > pq_src_cnt) { submit->flags &= ~ASYNC_TX_ACK; submit->flags |= ASYNC_TX_FENCE; submit->cb_fn = NULL; submit->cb_param = NULL; } else { submit->cb_fn = cb_fn_orig; submit->cb_param = cb_param_orig; if (cb_fn_orig) dma_flags |= DMA_PREP_INTERRUPT; } if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; /* Drivers force forward progress in case they can not provide * a descriptor */ for (;;) { dma_dest[0] = unmap->addr[disks - 2]; dma_dest[1] = unmap->addr[disks - 1]; tx = dma->device_prep_dma_pq(chan, dma_dest, &unmap->addr[src_off], pq_src_cnt, &scfs[src_off], unmap->len, dma_flags); if (likely(tx)) break; async_tx_quiesce(&submit->depend_tx); dma_async_issue_pending(chan); } dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); submit->depend_tx = tx; /* drop completed sources */ src_cnt -= pq_src_cnt; src_off += pq_src_cnt; dma_flags |= DMA_PREP_CONTINUE; } return tx; } /* * do_sync_gen_syndrome - synchronously calculate a raid6 syndrome */ static void do_sync_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) { void **srcs; int i; int start = -1, stop = disks - 3; if (submit->scribble) srcs = submit->scribble; else srcs = (void **) blocks; for (i = 0; i < disks; i++) { if (blocks[i] == NULL) { BUG_ON(i > disks - 3); /* P or Q can't be zero */ srcs[i] = (void*)raid6_empty_zero_page; } else { srcs[i] = page_address(blocks[i]) + offsets[i]; if (i < disks - 2) { stop = i; if (start == -1) start = i; } } } if (submit->flags & ASYNC_TX_PQ_XOR_DST) { BUG_ON(!raid6_call.xor_syndrome); if (start >= 0) raid6_call.xor_syndrome(disks, start, stop, len, srcs); } else raid6_call.gen_syndrome(disks, len, srcs); async_tx_sync_epilog(submit); } static inline bool is_dma_pq_aligned_offs(struct dma_device *dev, unsigned int *offs, int src_cnt, size_t len) { int i; for (i = 0; i < src_cnt; i++) { if (!is_dma_pq_aligned(dev, offs[i], 0, len)) return false; } return true; } /** * async_gen_syndrome - asynchronously calculate a raid6 syndrome * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 * @offsets: offset array into each block (src and dest) to start transaction * @disks: number of blocks (including missing P or Q, see below) * @len: length of operation in bytes * @submit: submission/completion modifiers * * General note: This routine assumes a field of GF(2^8) with a * primitive polynomial of 0x11d and a generator of {02}. * * 'disks' note: callers can optionally omit either P or Q (but not * both) from the calculation by setting blocks[disks-2] or * blocks[disks-1] to NULL. When P or Q is omitted 'len' must be <= * PAGE_SIZE as a temporary buffer of this size is used in the * synchronous path. 'disks' always accounts for both destination * buffers. If any source buffers (blocks[i] where i < disks - 2) are * set to NULL those buffers will be replaced with the raid6_zero_page * in the synchronous path and omitted in the hardware-asynchronous * path. */ struct dma_async_tx_descriptor * async_gen_syndrome(struct page **blocks, unsigned int *offsets, int disks, size_t len, struct async_submit_ctl *submit) { int src_cnt = disks - 2; struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ, &P(blocks, disks), 2, blocks, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; struct dmaengine_unmap_data *unmap = NULL; BUG_ON(disks > MAX_DISKS || !(P(blocks, disks) || Q(blocks, disks))); if (device) unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); /* XORing P/Q is only implemented in software */ if (unmap && !(submit->flags & ASYNC_TX_PQ_XOR_DST) && (src_cnt <= dma_maxpq(device, 0) || dma_maxpq(device, DMA_PREP_CONTINUE) > 0) && is_dma_pq_aligned_offs(device, offsets, disks, len)) { struct dma_async_tx_descriptor *tx; enum dma_ctrl_flags dma_flags = 0; unsigned char coefs[MAX_DISKS]; int i, j; /* run the p+q asynchronously */ pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); /* convert source addresses being careful to collapse 'empty' * sources and update the coefficients accordingly */ unmap->len = len; for (i = 0, j = 0; i < src_cnt; i++) { if (blocks[i] == NULL) continue; unmap->addr[j] = dma_map_page(device->dev, blocks[i], offsets[i], len, DMA_TO_DEVICE); coefs[j] = raid6_gfexp[i]; unmap->to_cnt++; j++; } /* * DMAs use destinations as sources, * so use BIDIRECTIONAL mapping */ unmap->bidi_cnt++; if (P(blocks, disks)) unmap->addr[j++] = dma_map_page(device->dev, P(blocks, disks), P(offsets, disks), len, DMA_BIDIRECTIONAL); else { unmap->addr[j++] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_P; } unmap->bidi_cnt++; if (Q(blocks, disks)) unmap->addr[j++] = dma_map_page(device->dev, Q(blocks, disks), Q(offsets, disks), len, DMA_BIDIRECTIONAL); else { unmap->addr[j++] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_Q; } tx = do_async_gen_syndrome(chan, coefs, j, unmap, dma_flags, submit); dmaengine_unmap_put(unmap); return tx; } dmaengine_unmap_put(unmap); /* run the pq synchronously */ pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); /* wait for any prerequisite operations */ async_tx_quiesce(&submit->depend_tx); if (!P(blocks, disks)) { P(blocks, disks) = pq_scribble_page; P(offsets, disks) = 0; } if (!Q(blocks, disks)) { Q(blocks, disks) = pq_scribble_page; Q(offsets, disks) = 0; } do_sync_gen_syndrome(blocks, offsets, disks, len, submit); return NULL; } EXPORT_SYMBOL_GPL(async_gen_syndrome); static inline struct dma_chan * pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len) { #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA return NULL; #endif return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0, blocks, disks, len); } /** * async_syndrome_val - asynchronously validate a raid6 syndrome * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1 * @offsets: common offset into each block (src and dest) to start transaction * @disks: number of blocks (including missing P or Q, see below) * @len: length of operation in bytes * @pqres: on val failure SUM_CHECK_P_RESULT and/or SUM_CHECK_Q_RESULT are set * @spare: temporary result buffer for the synchronous case * @s_off: spare buffer page offset * @submit: submission / completion modifiers * * The same notes from async_gen_syndrome apply to the 'blocks', * and 'disks' parameters of this routine. The synchronous path * requires a temporary result buffer and submit->scribble to be * specified. */ struct dma_async_tx_descriptor * async_syndrome_val(struct page **blocks, unsigned int *offsets, int disks, size_t len, enum sum_check_flags *pqres, struct page *spare, unsigned int s_off, struct async_submit_ctl *submit) { struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx; unsigned char coefs[MAX_DISKS]; enum dma_ctrl_flags dma_flags = submit->cb_fn ? DMA_PREP_INTERRUPT : 0; struct dmaengine_unmap_data *unmap = NULL; BUG_ON(disks < 4 || disks > MAX_DISKS); if (device) unmap = dmaengine_get_unmap_data(device->dev, disks, GFP_NOWAIT); if (unmap && disks <= dma_maxpq(device, 0) && is_dma_pq_aligned_offs(device, offsets, disks, len)) { struct device *dev = device->dev; dma_addr_t pq[2]; int i, j = 0, src_cnt = 0; pr_debug("%s: (async) disks: %d len: %zu\n", __func__, disks, len); unmap->len = len; for (i = 0; i < disks-2; i++) if (likely(blocks[i])) { unmap->addr[j] = dma_map_page(dev, blocks[i], offsets[i], len, DMA_TO_DEVICE); coefs[j] = raid6_gfexp[i]; unmap->to_cnt++; src_cnt++; j++; } if (!P(blocks, disks)) { pq[0] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_P; } else { pq[0] = dma_map_page(dev, P(blocks, disks), P(offsets, disks), len, DMA_TO_DEVICE); unmap->addr[j++] = pq[0]; unmap->to_cnt++; } if (!Q(blocks, disks)) { pq[1] = 0; dma_flags |= DMA_PREP_PQ_DISABLE_Q; } else { pq[1] = dma_map_page(dev, Q(blocks, disks), Q(offsets, disks), len, DMA_TO_DEVICE); unmap->addr[j++] = pq[1]; unmap->to_cnt++; } if (submit->flags & ASYNC_TX_FENCE) dma_flags |= DMA_PREP_FENCE; for (;;) { tx = device->device_prep_dma_pq_val(chan, pq, unmap->addr, src_cnt, coefs, len, pqres, dma_flags); if (likely(tx)) break; async_tx_quiesce(&submit->depend_tx); dma_async_issue_pending(chan); } dma_set_unmap(tx, unmap); async_tx_submit(chan, tx, submit); } else { struct page *p_src = P(blocks, disks); unsigned int p_off = P(offsets, disks); struct page *q_src = Q(blocks, disks); unsigned int q_off = Q(offsets, disks); enum async_tx_flags flags_orig = submit->flags; dma_async_tx_callback cb_fn_orig = submit->cb_fn; void *scribble = submit->scribble; void *cb_param_orig = submit->cb_param; void *p, *q, *s; pr_debug("%s: (sync) disks: %d len: %zu\n", __func__, disks, len); /* caller must provide a temporary result buffer and * allow the input parameters to be preserved */ BUG_ON(!spare || !scribble); /* wait for any prerequisite operations */ async_tx_quiesce(&submit->depend_tx); /* recompute p and/or q into the temporary buffer and then * check to see the result matches the current value */ tx = NULL; *pqres = 0; if (p_src) { init_async_submit(submit, ASYNC_TX_XOR_ZERO_DST, NULL, NULL, NULL, scribble); tx = async_xor_offs(spare, s_off, blocks, offsets, disks-2, len, submit); async_tx_quiesce(&tx); p = page_address(p_src) + p_off; s = page_address(spare) + s_off; *pqres |= !!memcmp(p, s, len) << SUM_CHECK_P; } if (q_src) { P(blocks, disks) = NULL; Q(blocks, disks) = spare; Q(offsets, disks) = s_off; init_async_submit(submit, 0, NULL, NULL, NULL, scribble); tx = async_gen_syndrome(blocks, offsets, disks, len, submit); async_tx_quiesce(&tx); q = page_address(q_src) + q_off; s = page_address(spare) + s_off; *pqres |= !!memcmp(q, s, len) << SUM_CHECK_Q; } /* restore P, Q and submit */ P(blocks, disks) = p_src; P(offsets, disks) = p_off; Q(blocks, disks) = q_src; Q(offsets, disks) = q_off; submit->cb_fn = cb_fn_orig; submit->cb_param = cb_param_orig; submit->flags = flags_orig; async_tx_sync_epilog(submit); tx = NULL; } dmaengine_unmap_put(unmap); return tx; } EXPORT_SYMBOL_GPL(async_syndrome_val); static int __init async_pq_init(void) { pq_scribble_page = alloc_page(GFP_KERNEL); if (pq_scribble_page) return 0; pr_err("%s: failed to allocate required spare page\n", __func__); return -ENOMEM; } static void __exit async_pq_exit(void) { __free_page(pq_scribble_page); } module_init(async_pq_init); module_exit(async_pq_exit); MODULE_DESCRIPTION("asynchronous raid6 syndrome generation/validation"); MODULE_LICENSE("GPL");
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DAL_IRQ_SERVICE_H__ #define __DAL_IRQ_SERVICE_H__ #include "include/irq_service_interface.h" #include "irq_types.h" struct irq_service; struct irq_source_info; struct irq_source_info_funcs { bool (*set)( struct irq_service *irq_service, const struct irq_source_info *info, bool enable); bool (*ack)( struct irq_service *irq_service, const struct irq_source_info *info); }; struct irq_source_info { uint32_t src_id; uint32_t ext_id; uint32_t enable_reg; uint32_t enable_mask; uint32_t enable_value[2]; uint32_t ack_reg; uint32_t ack_mask; uint32_t ack_value; uint32_t status_reg; struct irq_source_info_funcs *funcs; }; struct irq_service_funcs { enum dc_irq_source (*to_dal_irq_source)( struct irq_service *irq_service, uint32_t src_id, uint32_t ext_id); }; struct irq_service { struct dc_context *ctx; const struct irq_source_info *info; const struct irq_service_funcs *funcs; }; void dal_irq_service_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data); void dal_irq_service_ack_generic( struct irq_service *irq_service, const struct irq_source_info *info); void dal_irq_service_set_generic( struct irq_service *irq_service, const struct irq_source_info *info, bool enable); #endif
/* SPDX-License-Identifier: GPL-2.0 */ /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2010 - 2015, Intel Corporation. */ #ifndef _IA_CSS_EVENTQ_H #define _IA_CSS_EVENTQ_H #include "ia_css_queue.h" /* queue APIs */ /** * @brief HOST receives event from SP. * * @param[in] eventq_handle eventq_handle. * @param[in] payload The event payload. * @return 0 - Successfully dequeue. * @return -EINVAL - Invalid argument. * @return -ENODATA - Queue is empty. */ int ia_css_eventq_recv( ia_css_queue_t *eventq_handle, uint8_t *payload); /** * @brief The Host sends the event to SP. * The caller of this API will be blocked until the event * is sent. * * @param[in] eventq_handle eventq_handle. * @param[in] evt_id The event ID. * @param[in] evt_payload_0 The event payload. * @param[in] evt_payload_1 The event payload. * @param[in] evt_payload_2 The event payload. * @return 0 - Successfully enqueue. * @return -EINVAL - Invalid argument. * @return -ENOBUFS - Queue is full. */ int ia_css_eventq_send( ia_css_queue_t *eventq_handle, u8 evt_id, u8 evt_payload_0, u8 evt_payload_1, uint8_t evt_payload_2); #endif /* _IA_CSS_EVENTQ_H */
// SPDX-License-Identifier: GPL-2.0 /* * mlx90635.c - Melexis MLX90635 contactless IR temperature sensor * * Copyright (c) 2023 Melexis <[email protected]> * * Driver for the Melexis MLX90635 I2C 16-bit IR thermopile sensor */ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/iopoll.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/limits.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/math64.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/iio/iio.h> /* Memory sections addresses */ #define MLX90635_ADDR_RAM 0x0000 /* Start address of ram */ #define MLX90635_ADDR_EEPROM 0x0018 /* Start address of user eeprom */ /* EEPROM addresses - used at startup */ #define MLX90635_EE_I2C_CFG 0x0018 /* I2C address register initial value */ #define MLX90635_EE_CTRL1 0x001A /* Control register1 initial value */ #define MLX90635_EE_CTRL2 0x001C /* Control register2 initial value */ #define MLX90635_EE_Ha 0x001E /* Ha customer calib value reg 16bit */ #define MLX90635_EE_Hb 0x0020 /* Hb customer calib value reg 16bit */ #define MLX90635_EE_Fa 0x0026 /* Fa calibration register 32bit */ #define MLX90635_EE_FASCALE 0x002A /* Scaling coefficient for Fa register 16bit */ #define MLX90635_EE_Ga 0x002C /* Ga calibration register 16bit */ #define MLX90635_EE_Fb 0x002E /* Fb calibration register 16bit */ #define MLX90635_EE_Ea 0x0030 /* Ea calibration register 32bit */ #define MLX90635_EE_Eb 0x0034 /* Eb calibration register 32bit */ #define MLX90635_EE_P_G 0x0038 /* P_G calibration register 16bit */ #define MLX90635_EE_P_O 0x003A /* P_O calibration register 16bit */ #define MLX90635_EE_Aa 0x003C /* Aa calibration register 16bit */ #define MLX90635_EE_VERSION 0x003E /* Version bits 4:7 and 12:15 */ #define MLX90635_EE_Gb 0x0040 /* Gb calibration register 16bit */ /* Device status register - volatile */ #define MLX90635_REG_STATUS 0x0000 #define MLX90635_STAT_BUSY BIT(6) /* Device busy indicator */ #define MLX90635_STAT_BRST BIT(5) /* Brown out reset indicator */ #define MLX90635_STAT_CYCLE_POS GENMASK(4, 2) /* Data position */ #define MLX90635_STAT_END_CONV BIT(1) /* End of conversion indicator */ #define MLX90635_STAT_DATA_RDY BIT(0) /* Data ready indicator */ /* EEPROM control register address - volatile */ #define MLX90635_REG_EE 0x000C #define MLX90635_EE_ACTIVE BIT(4) /* Power-on EEPROM */ #define MLX90635_EE_BUSY_MASK BIT(15) #define MLX90635_REG_CMD 0x0010 /* Command register address */ /* Control register1 address - volatile */ #define MLX90635_REG_CTRL1 0x0014 #define MLX90635_CTRL1_REFRESH_RATE_MASK GENMASK(2, 0) #define MLX90635_CTRL1_RES_CTRL_MASK GENMASK(4, 3) #define MLX90635_CTRL1_TABLE_MASK BIT(15) /* Table select */ /* Control register2 address - volatile */ #define MLX90635_REG_CTRL2 0x0016 #define MLX90635_CTRL2_BURST_CNT_MASK GENMASK(10, 6) /* Burst count */ #define MLX90635_CTRL2_MODE_MASK GENMASK(12, 11) /* Power mode */ #define MLX90635_CTRL2_SOB_MASK BIT(15) /* PowerModes statuses */ #define MLX90635_PWR_STATUS_HALT 0 #define MLX90635_PWR_STATUS_SLEEP_STEP 1 #define MLX90635_PWR_STATUS_STEP 2 #define MLX90635_PWR_STATUS_CONTINUOUS 3 /* Measurement data addresses */ #define MLX90635_RESULT_1 0x0002 #define MLX90635_RESULT_2 0x0004 #define MLX90635_RESULT_3 0x0006 #define MLX90635_RESULT_4 0x0008 #define MLX90635_RESULT_5 0x000A /* Timings (ms) */ #define MLX90635_TIMING_RST_MIN 200 /* Minimum time after addressed reset command */ #define MLX90635_TIMING_RST_MAX 250 /* Maximum time after addressed reset command */ #define MLX90635_TIMING_POLLING 10000 /* Time between bit polling*/ #define MLX90635_TIMING_EE_ACTIVE_MIN 100 /* Minimum time after activating the EEPROM for read */ #define MLX90635_TIMING_EE_ACTIVE_MAX 150 /* Maximum time after activating the EEPROM for read */ /* Magic constants */ #define MLX90635_ID_DSPv1 0x01 /* EEPROM DSP version */ #define MLX90635_RESET_CMD 0x0006 /* Reset sensor (address or global) */ #define MLX90635_MAX_MEAS_NUM 31 /* Maximum number of measurements in list */ #define MLX90635_PTAT_DIV 12 /* Used to divide the PTAT value in pre-processing */ #define MLX90635_IR_DIV 24 /* Used to divide the IR value in pre-processing */ #define MLX90635_SLEEP_DELAY_MS 6000 /* Autosleep delay */ #define MLX90635_MEAS_MAX_TIME 2000 /* Max measurement time in ms for the lowest refresh rate */ #define MLX90635_READ_RETRIES 100 /* Number of read retries before quitting with timeout error */ #define MLX90635_VERSION_MASK (GENMASK(15, 12) | GENMASK(7, 4)) #define MLX90635_DSP_VERSION(reg) (((reg & GENMASK(14, 12)) >> 9) | ((reg & GENMASK(6, 4)) >> 4)) #define MLX90635_DSP_FIXED BIT(15) /** * struct mlx90635_data - private data for the MLX90635 device * @client: I2C client of the device * @lock: Internal mutex because multiple reads are needed for single triggered * measurement to ensure data consistency * @regmap: Regmap of the device registers * @regmap_ee: Regmap of the device EEPROM which can be cached * @emissivity: Object emissivity from 0 to 1000 where 1000 = 1 * @regulator: Regulator of the device * @powerstatus: Current POWER status of the device * @interaction_ts: Timestamp of the last temperature read that is used * for power management in jiffies */ struct mlx90635_data { struct i2c_client *client; struct mutex lock; struct regmap *regmap; struct regmap *regmap_ee; u16 emissivity; struct regulator *regulator; int powerstatus; unsigned long interaction_ts; }; static const struct regmap_range mlx90635_volatile_reg_range[] = { regmap_reg_range(MLX90635_REG_STATUS, MLX90635_REG_STATUS), regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5), regmap_reg_range(MLX90635_REG_EE, MLX90635_REG_EE), regmap_reg_range(MLX90635_REG_CMD, MLX90635_REG_CMD), regmap_reg_range(MLX90635_REG_CTRL1, MLX90635_REG_CTRL2), }; static const struct regmap_access_table mlx90635_volatile_regs_tbl = { .yes_ranges = mlx90635_volatile_reg_range, .n_yes_ranges = ARRAY_SIZE(mlx90635_volatile_reg_range), }; static const struct regmap_range mlx90635_read_reg_range[] = { regmap_reg_range(MLX90635_REG_STATUS, MLX90635_REG_STATUS), regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5), regmap_reg_range(MLX90635_REG_EE, MLX90635_REG_EE), regmap_reg_range(MLX90635_REG_CMD, MLX90635_REG_CMD), regmap_reg_range(MLX90635_REG_CTRL1, MLX90635_REG_CTRL2), }; static const struct regmap_access_table mlx90635_readable_regs_tbl = { .yes_ranges = mlx90635_read_reg_range, .n_yes_ranges = ARRAY_SIZE(mlx90635_read_reg_range), }; static const struct regmap_range mlx90635_no_write_reg_range[] = { regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5), }; static const struct regmap_access_table mlx90635_writeable_regs_tbl = { .no_ranges = mlx90635_no_write_reg_range, .n_no_ranges = ARRAY_SIZE(mlx90635_no_write_reg_range), }; static const struct regmap_config mlx90635_regmap = { .name = "mlx90635-registers", .reg_stride = 1, .reg_bits = 16, .val_bits = 16, .volatile_table = &mlx90635_volatile_regs_tbl, .rd_table = &mlx90635_readable_regs_tbl, .wr_table = &mlx90635_writeable_regs_tbl, .use_single_read = true, .use_single_write = true, .can_multi_write = false, .reg_format_endian = REGMAP_ENDIAN_BIG, .val_format_endian = REGMAP_ENDIAN_BIG, .cache_type = REGCACHE_RBTREE, }; static const struct regmap_range mlx90635_read_ee_range[] = { regmap_reg_range(MLX90635_EE_I2C_CFG, MLX90635_EE_CTRL2), regmap_reg_range(MLX90635_EE_Ha, MLX90635_EE_Gb), }; static const struct regmap_access_table mlx90635_readable_ees_tbl = { .yes_ranges = mlx90635_read_ee_range, .n_yes_ranges = ARRAY_SIZE(mlx90635_read_ee_range), }; static const struct regmap_range mlx90635_no_write_ee_range[] = { regmap_reg_range(MLX90635_ADDR_EEPROM, MLX90635_EE_Gb), }; static const struct regmap_access_table mlx90635_writeable_ees_tbl = { .no_ranges = mlx90635_no_write_ee_range, .n_no_ranges = ARRAY_SIZE(mlx90635_no_write_ee_range), }; static const struct regmap_config mlx90635_regmap_ee = { .name = "mlx90635-eeprom", .reg_stride = 1, .reg_bits = 16, .val_bits = 16, .volatile_table = NULL, .rd_table = &mlx90635_readable_ees_tbl, .wr_table = &mlx90635_writeable_ees_tbl, .use_single_read = true, .use_single_write = true, .can_multi_write = false, .reg_format_endian = REGMAP_ENDIAN_BIG, .val_format_endian = REGMAP_ENDIAN_BIG, .cache_type = REGCACHE_RBTREE, }; /** * mlx90635_reset_delay() - Give the mlx90635 some time to reset properly * If this is not done, the following I2C command(s) will not be accepted. */ static void mlx90635_reset_delay(void) { usleep_range(MLX90635_TIMING_RST_MIN, MLX90635_TIMING_RST_MAX); } static int mlx90635_pwr_sleep_step(struct mlx90635_data *data) { int ret; if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP) return 0; ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2, MLX90635_CTRL2_MODE_MASK, FIELD_PREP(MLX90635_CTRL2_MODE_MASK, MLX90635_PWR_STATUS_SLEEP_STEP)); if (ret < 0) return ret; data->powerstatus = MLX90635_PWR_STATUS_SLEEP_STEP; return 0; } static int mlx90635_pwr_continuous(struct mlx90635_data *data) { int ret; if (data->powerstatus == MLX90635_PWR_STATUS_CONTINUOUS) return 0; ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2, MLX90635_CTRL2_MODE_MASK, FIELD_PREP(MLX90635_CTRL2_MODE_MASK, MLX90635_PWR_STATUS_CONTINUOUS)); if (ret < 0) return ret; data->powerstatus = MLX90635_PWR_STATUS_CONTINUOUS; return 0; } static int mlx90635_read_ee_register(struct regmap *regmap, u16 reg_lsb, s32 *reg_value) { unsigned int read; u32 value; int ret; ret = regmap_read(regmap, reg_lsb + 2, &read); if (ret < 0) return ret; value = read; ret = regmap_read(regmap, reg_lsb, &read); if (ret < 0) return ret; *reg_value = (read << 16) | (value & 0xffff); return 0; } static int mlx90635_read_ee_ambient(struct regmap *regmap, s16 *PG, s16 *PO, s16 *Gb) { unsigned int read_tmp; int ret; ret = regmap_read(regmap, MLX90635_EE_P_O, &read_tmp); if (ret < 0) return ret; *PO = (s16)read_tmp; ret = regmap_read(regmap, MLX90635_EE_P_G, &read_tmp); if (ret < 0) return ret; *PG = (s16)read_tmp; ret = regmap_read(regmap, MLX90635_EE_Gb, &read_tmp); if (ret < 0) return ret; *Gb = (u16)read_tmp; return 0; } static int mlx90635_read_ee_object(struct regmap *regmap, u32 *Ea, u32 *Eb, u32 *Fa, s16 *Fb, s16 *Ga, s16 *Gb, s16 *Ha, s16 *Hb, u16 *Fa_scale) { unsigned int read_tmp; int ret; ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Ea, Ea); if (ret < 0) return ret; ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Eb, Eb); if (ret < 0) return ret; ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Fa, Fa); if (ret < 0) return ret; ret = regmap_read(regmap, MLX90635_EE_Ha, &read_tmp); if (ret < 0) return ret; *Ha = (s16)read_tmp; ret = regmap_read(regmap, MLX90635_EE_Hb, &read_tmp); if (ret < 0) return ret; *Hb = (s16)read_tmp; ret = regmap_read(regmap, MLX90635_EE_Ga, &read_tmp); if (ret < 0) return ret; *Ga = (s16)read_tmp; ret = regmap_read(regmap, MLX90635_EE_Gb, &read_tmp); if (ret < 0) return ret; *Gb = (s16)read_tmp; ret = regmap_read(regmap, MLX90635_EE_Fb, &read_tmp); if (ret < 0) return ret; *Fb = (s16)read_tmp; ret = regmap_read(regmap, MLX90635_EE_FASCALE, &read_tmp); if (ret < 0) return ret; *Fa_scale = (u16)read_tmp; return 0; } static int mlx90635_calculate_dataset_ready_time(struct mlx90635_data *data, int *refresh_time) { unsigned int reg; int ret; ret = regmap_read(data->regmap, MLX90635_REG_CTRL1, &reg); if (ret < 0) return ret; *refresh_time = 2 * (MLX90635_MEAS_MAX_TIME >> FIELD_GET(MLX90635_CTRL1_REFRESH_RATE_MASK, reg)) + 80; return 0; } static int mlx90635_perform_measurement_burst(struct mlx90635_data *data) { unsigned int reg_status; int refresh_time; int ret; ret = regmap_write_bits(data->regmap, MLX90635_REG_STATUS, MLX90635_STAT_END_CONV, MLX90635_STAT_END_CONV); if (ret < 0) return ret; ret = mlx90635_calculate_dataset_ready_time(data, &refresh_time); if (ret < 0) return ret; ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2, FIELD_PREP(MLX90635_CTRL2_SOB_MASK, 1), FIELD_PREP(MLX90635_CTRL2_SOB_MASK, 1)); if (ret < 0) return ret; msleep(refresh_time); /* Wait minimum time for dataset to be ready */ ret = regmap_read_poll_timeout(data->regmap, MLX90635_REG_STATUS, reg_status, (!(reg_status & MLX90635_STAT_END_CONV)) == 0, MLX90635_TIMING_POLLING, MLX90635_READ_RETRIES * 10000); if (ret < 0) { dev_err(&data->client->dev, "data not ready"); return -ETIMEDOUT; } return 0; } static int mlx90635_read_ambient_raw(struct regmap *regmap, s16 *ambient_new_raw, s16 *ambient_old_raw) { unsigned int read_tmp; int ret; ret = regmap_read(regmap, MLX90635_RESULT_2, &read_tmp); if (ret < 0) return ret; *ambient_new_raw = (s16)read_tmp; ret = regmap_read(regmap, MLX90635_RESULT_3, &read_tmp); if (ret < 0) return ret; *ambient_old_raw = (s16)read_tmp; return 0; } static int mlx90635_read_object_raw(struct regmap *regmap, s16 *object_raw) { unsigned int read_tmp; s16 read; int ret; ret = regmap_read(regmap, MLX90635_RESULT_1, &read_tmp); if (ret < 0) return ret; read = (s16)read_tmp; ret = regmap_read(regmap, MLX90635_RESULT_4, &read_tmp); if (ret < 0) return ret; *object_raw = (read - (s16)read_tmp) / 2; return 0; } static int mlx90635_read_all_channel(struct mlx90635_data *data, s16 *ambient_new_raw, s16 *ambient_old_raw, s16 *object_raw) { int ret; mutex_lock(&data->lock); if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP) { /* Trigger measurement in Sleep Step mode */ ret = mlx90635_perform_measurement_burst(data); if (ret < 0) goto read_unlock; } ret = mlx90635_read_ambient_raw(data->regmap, ambient_new_raw, ambient_old_raw); if (ret < 0) goto read_unlock; ret = mlx90635_read_object_raw(data->regmap, object_raw); read_unlock: mutex_unlock(&data->lock); return ret; } static s64 mlx90635_preprocess_temp_amb(s16 ambient_new_raw, s16 ambient_old_raw, s16 Gb) { s64 VR_Ta, kGb, tmp; kGb = ((s64)Gb * 1000LL) >> 10ULL; VR_Ta = (s64)ambient_old_raw * 1000000LL + kGb * div64_s64(((s64)ambient_new_raw * 1000LL), (MLX90635_PTAT_DIV)); tmp = div64_s64( div64_s64(((s64)ambient_new_raw * 1000000000000LL), (MLX90635_PTAT_DIV)), VR_Ta); return div64_s64(tmp << 19ULL, 1000LL); } static s64 mlx90635_preprocess_temp_obj(s16 object_raw, s16 ambient_new_raw, s16 ambient_old_raw, s16 Gb) { s64 VR_IR, kGb, tmp; kGb = ((s64)Gb * 1000LL) >> 10ULL; VR_IR = (s64)ambient_old_raw * 1000000LL + kGb * (div64_s64((s64)ambient_new_raw * 1000LL, MLX90635_PTAT_DIV)); tmp = div64_s64( div64_s64((s64)(object_raw * 1000000LL), MLX90635_IR_DIV) * 1000000LL, VR_IR); return div64_s64((tmp << 19ULL), 1000LL); } static s32 mlx90635_calc_temp_ambient(s16 ambient_new_raw, s16 ambient_old_raw, u16 P_G, u16 P_O, s16 Gb) { s64 kPG, kPO, AMB; AMB = mlx90635_preprocess_temp_amb(ambient_new_raw, ambient_old_raw, Gb); kPG = ((s64)P_G * 1000000LL) >> 9ULL; kPO = AMB - (((s64)P_O * 1000LL) >> 1ULL); return 30 * 1000LL + div64_s64(kPO * 1000000LL, kPG); } static s32 mlx90635_calc_temp_object_iteration(s32 prev_object_temp, s64 object, s64 TAdut, s64 TAdut4, s16 Ga, u32 Fa, u16 Fa_scale, s16 Fb, s16 Ha, s16 Hb, u16 emissivity) { s64 calcedGa, calcedGb, calcedFa, Alpha_corr; s64 Ha_customer, Hb_customer; Ha_customer = ((s64)Ha * 1000000LL) >> 14ULL; Hb_customer = ((s64)Hb * 100) >> 10ULL; calcedGa = ((s64)((s64)Ga * (prev_object_temp - 35 * 1000LL) * 1000LL)) >> 24LL; calcedGb = ((s64)(Fb * (TAdut - 30 * 1000000LL))) >> 24LL; Alpha_corr = ((s64)((s64)Fa * Ha_customer * 10000LL) >> Fa_scale); Alpha_corr *= ((s64)(1 * 1000000LL + calcedGa + calcedGb)); Alpha_corr = div64_s64(Alpha_corr, 1000LL); Alpha_corr *= emissivity; Alpha_corr = div64_s64(Alpha_corr, 100LL); calcedFa = div64_s64((s64)object * 100000000000LL, Alpha_corr); return (int_sqrt64(int_sqrt64(calcedFa * 100000000LL + TAdut4)) - 27315 - Hb_customer) * 10; } static s64 mlx90635_calc_ta4(s64 TAdut, s64 scale) { return (div64_s64(TAdut, scale) + 27315) * (div64_s64(TAdut, scale) + 27315) * (div64_s64(TAdut, scale) + 27315) * (div64_s64(TAdut, scale) + 27315); } static s32 mlx90635_calc_temp_object(s64 object, s64 ambient, u32 Ea, u32 Eb, s16 Ga, u32 Fa, u16 Fa_scale, s16 Fb, s16 Ha, s16 Hb, u16 tmp_emi) { s64 kTA, kTA0, TAdut, TAdut4; s64 temp = 35000; s8 i; kTA = (Ea * 1000LL) >> 16LL; kTA0 = (Eb * 1000LL) >> 8LL; TAdut = div64_s64(((ambient - kTA0) * 1000000LL), kTA) + 30 * 1000000LL; TAdut4 = mlx90635_calc_ta4(TAdut, 10000LL); /* Iterations of calculation as described in datasheet */ for (i = 0; i < 5; ++i) { temp = mlx90635_calc_temp_object_iteration(temp, object, TAdut, TAdut4, Ga, Fa, Fa_scale, Fb, Ha, Hb, tmp_emi); } return temp; } static int mlx90635_calc_object(struct mlx90635_data *data, int *val) { s16 ambient_new_raw, ambient_old_raw, object_raw; s16 Fb, Ga, Gb, Ha, Hb; s64 object, ambient; u32 Ea, Eb, Fa; u16 Fa_scale; int ret; ret = mlx90635_read_ee_object(data->regmap_ee, &Ea, &Eb, &Fa, &Fb, &Ga, &Gb, &Ha, &Hb, &Fa_scale); if (ret < 0) return ret; ret = mlx90635_read_all_channel(data, &ambient_new_raw, &ambient_old_raw, &object_raw); if (ret < 0) return ret; ambient = mlx90635_preprocess_temp_amb(ambient_new_raw, ambient_old_raw, Gb); object = mlx90635_preprocess_temp_obj(object_raw, ambient_new_raw, ambient_old_raw, Gb); *val = mlx90635_calc_temp_object(object, ambient, Ea, Eb, Ga, Fa, Fa_scale, Fb, Ha, Hb, data->emissivity); return 0; } static int mlx90635_calc_ambient(struct mlx90635_data *data, int *val) { s16 ambient_new_raw, ambient_old_raw; s16 PG, PO, Gb; int ret; ret = mlx90635_read_ee_ambient(data->regmap_ee, &PG, &PO, &Gb); if (ret < 0) return ret; mutex_lock(&data->lock); if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP) { ret = mlx90635_perform_measurement_burst(data); if (ret < 0) goto read_ambient_unlock; } ret = mlx90635_read_ambient_raw(data->regmap, &ambient_new_raw, &ambient_old_raw); read_ambient_unlock: mutex_unlock(&data->lock); if (ret < 0) return ret; *val = mlx90635_calc_temp_ambient(ambient_new_raw, ambient_old_raw, PG, PO, Gb); return ret; } static int mlx90635_get_refresh_rate(struct mlx90635_data *data, unsigned int *refresh_rate) { unsigned int reg; int ret; ret = regmap_read(data->regmap, MLX90635_REG_CTRL1, &reg); if (ret < 0) return ret; *refresh_rate = FIELD_GET(MLX90635_CTRL1_REFRESH_RATE_MASK, reg); return 0; } static const struct { int val; int val2; } mlx90635_freqs[] = { { 0, 200000 }, { 0, 500000 }, { 0, 900000 }, { 1, 700000 }, { 3, 0 }, { 4, 800000 }, { 6, 900000 }, { 8, 900000 } }; /** * mlx90635_pm_interaction_wakeup() - Measure time between user interactions to change powermode * @data: pointer to mlx90635_data object containing interaction_ts information * * Switch to continuous mode when interaction is faster than MLX90635_MEAS_MAX_TIME. Update the * interaction_ts for each function call with the jiffies to enable measurement between function * calls. Initial value of the interaction_ts needs to be set before this function call. */ static int mlx90635_pm_interaction_wakeup(struct mlx90635_data *data) { unsigned long now; int ret; now = jiffies; if (time_in_range(now, data->interaction_ts, data->interaction_ts + msecs_to_jiffies(MLX90635_MEAS_MAX_TIME + 100))) { ret = mlx90635_pwr_continuous(data); if (ret < 0) return ret; } data->interaction_ts = now; return 0; } static int mlx90635_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *channel, int *val, int *val2, long mask) { struct mlx90635_data *data = iio_priv(indio_dev); int ret; int cr; pm_runtime_get_sync(&data->client->dev); ret = mlx90635_pm_interaction_wakeup(data); if (ret < 0) goto mlx90635_read_raw_pm; switch (mask) { case IIO_CHAN_INFO_PROCESSED: switch (channel->channel2) { case IIO_MOD_TEMP_AMBIENT: ret = mlx90635_calc_ambient(data, val); if (ret < 0) goto mlx90635_read_raw_pm; ret = IIO_VAL_INT; break; case IIO_MOD_TEMP_OBJECT: ret = mlx90635_calc_object(data, val); if (ret < 0) goto mlx90635_read_raw_pm; ret = IIO_VAL_INT; break; default: ret = -EINVAL; break; } break; case IIO_CHAN_INFO_CALIBEMISSIVITY: if (data->emissivity == 1000) { *val = 1; *val2 = 0; } else { *val = 0; *val2 = data->emissivity * 1000; } ret = IIO_VAL_INT_PLUS_MICRO; break; case IIO_CHAN_INFO_SAMP_FREQ: ret = mlx90635_get_refresh_rate(data, &cr); if (ret < 0) goto mlx90635_read_raw_pm; *val = mlx90635_freqs[cr].val; *val2 = mlx90635_freqs[cr].val2; ret = IIO_VAL_INT_PLUS_MICRO; break; default: ret = -EINVAL; break; } mlx90635_read_raw_pm: pm_runtime_mark_last_busy(&data->client->dev); pm_runtime_put_autosuspend(&data->client->dev); return ret; } static int mlx90635_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *channel, int val, int val2, long mask) { struct mlx90635_data *data = iio_priv(indio_dev); int ret; int i; switch (mask) { case IIO_CHAN_INFO_CALIBEMISSIVITY: /* Confirm we are within 0 and 1.0 */ if (val < 0 || val2 < 0 || val > 1 || (val == 1 && val2 != 0)) return -EINVAL; data->emissivity = val * 1000 + val2 / 1000; return 0; case IIO_CHAN_INFO_SAMP_FREQ: for (i = 0; i < ARRAY_SIZE(mlx90635_freqs); i++) { if (val == mlx90635_freqs[i].val && val2 == mlx90635_freqs[i].val2) break; } if (i == ARRAY_SIZE(mlx90635_freqs)) return -EINVAL; ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL1, MLX90635_CTRL1_REFRESH_RATE_MASK, i); return ret; default: return -EINVAL; } } static int mlx90635_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, const int **vals, int *type, int *length, long mask) { switch (mask) { case IIO_CHAN_INFO_SAMP_FREQ: *vals = (int *)mlx90635_freqs; *type = IIO_VAL_INT_PLUS_MICRO; *length = 2 * ARRAY_SIZE(mlx90635_freqs); return IIO_AVAIL_LIST; default: return -EINVAL; } } static const struct iio_chan_spec mlx90635_channels[] = { { .type = IIO_TEMP, .modified = 1, .channel2 = IIO_MOD_TEMP_AMBIENT, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), }, { .type = IIO_TEMP, .modified = 1, .channel2 = IIO_MOD_TEMP_OBJECT, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | BIT(IIO_CHAN_INFO_CALIBEMISSIVITY), .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), }, }; static const struct iio_info mlx90635_info = { .read_raw = mlx90635_read_raw, .write_raw = mlx90635_write_raw, .read_avail = mlx90635_read_avail, }; static void mlx90635_sleep(void *_data) { struct mlx90635_data *data = _data; mlx90635_pwr_sleep_step(data); } static int mlx90635_suspend(struct mlx90635_data *data) { return mlx90635_pwr_sleep_step(data); } static int mlx90635_wakeup(struct mlx90635_data *data) { s16 Fb, Ga, Gb, Ha, Hb, PG, PO; unsigned int dsp_version; u32 Ea, Eb, Fa; u16 Fa_scale; int ret; regcache_cache_bypass(data->regmap_ee, false); regcache_cache_only(data->regmap_ee, false); regcache_cache_only(data->regmap, false); ret = mlx90635_pwr_continuous(data); if (ret < 0) { dev_err(&data->client->dev, "Switch to continuous mode failed\n"); return ret; } ret = regmap_write_bits(data->regmap, MLX90635_REG_EE, MLX90635_EE_ACTIVE, MLX90635_EE_ACTIVE); if (ret < 0) { dev_err(&data->client->dev, "Powering EEPROM failed\n"); return ret; } usleep_range(MLX90635_TIMING_EE_ACTIVE_MIN, MLX90635_TIMING_EE_ACTIVE_MAX); regcache_mark_dirty(data->regmap_ee); ret = regcache_sync(data->regmap_ee); if (ret < 0) { dev_err(&data->client->dev, "Failed to sync cache: %d\n", ret); return ret; } ret = mlx90635_read_ee_ambient(data->regmap_ee, &PG, &PO, &Gb); if (ret < 0) { dev_err(&data->client->dev, "Failed to read to cache Ambient coefficients EEPROM region: %d\n", ret); return ret; } ret = mlx90635_read_ee_object(data->regmap_ee, &Ea, &Eb, &Fa, &Fb, &Ga, &Gb, &Ha, &Hb, &Fa_scale); if (ret < 0) { dev_err(&data->client->dev, "Failed to read to cache Object coefficients EEPROM region: %d\n", ret); return ret; } ret = regmap_read(data->regmap_ee, MLX90635_EE_VERSION, &dsp_version); if (ret < 0) { dev_err(&data->client->dev, "Failed to read to cache of EEPROM version: %d\n", ret); return ret; } regcache_cache_only(data->regmap_ee, true); return ret; } static void mlx90635_disable_regulator(void *_data) { struct mlx90635_data *data = _data; int ret; ret = regulator_disable(data->regulator); if (ret < 0) dev_err(regmap_get_device(data->regmap), "Failed to disable power regulator: %d\n", ret); } static int mlx90635_enable_regulator(struct mlx90635_data *data) { int ret; ret = regulator_enable(data->regulator); if (ret < 0) { dev_err(regmap_get_device(data->regmap), "Failed to enable power regulator!\n"); return ret; } mlx90635_reset_delay(); return ret; } static int mlx90635_probe(struct i2c_client *client) { struct mlx90635_data *mlx90635; struct iio_dev *indio_dev; unsigned int dsp_version; struct regmap *regmap; struct regmap *regmap_ee; int ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mlx90635)); if (!indio_dev) return dev_err_probe(&client->dev, -ENOMEM, "failed to allocate device\n"); regmap = devm_regmap_init_i2c(client, &mlx90635_regmap); if (IS_ERR(regmap)) return dev_err_probe(&client->dev, PTR_ERR(regmap), "failed to allocate regmap\n"); regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee); if (IS_ERR(regmap_ee)) return dev_err_probe(&client->dev, PTR_ERR(regmap_ee), "failed to allocate EEPROM regmap\n"); mlx90635 = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); mlx90635->client = client; mlx90635->regmap = regmap; mlx90635->regmap_ee = regmap_ee; mlx90635->powerstatus = MLX90635_PWR_STATUS_SLEEP_STEP; mutex_init(&mlx90635->lock); indio_dev->name = "mlx90635"; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &mlx90635_info; indio_dev->channels = mlx90635_channels; indio_dev->num_channels = ARRAY_SIZE(mlx90635_channels); mlx90635->regulator = devm_regulator_get(&client->dev, "vdd"); if (IS_ERR(mlx90635->regulator)) return dev_err_probe(&client->dev, PTR_ERR(mlx90635->regulator), "failed to get vdd regulator"); ret = mlx90635_enable_regulator(mlx90635); if (ret < 0) return ret; ret = devm_add_action_or_reset(&client->dev, mlx90635_disable_regulator, mlx90635); if (ret < 0) return dev_err_probe(&client->dev, ret, "failed to setup regulator cleanup action\n"); ret = mlx90635_wakeup(mlx90635); if (ret < 0) return dev_err_probe(&client->dev, ret, "wakeup failed\n"); ret = devm_add_action_or_reset(&client->dev, mlx90635_sleep, mlx90635); if (ret < 0) return dev_err_probe(&client->dev, ret, "failed to setup low power cleanup\n"); ret = regmap_read(mlx90635->regmap_ee, MLX90635_EE_VERSION, &dsp_version); if (ret < 0) return dev_err_probe(&client->dev, ret, "read of version failed\n"); dsp_version = dsp_version & MLX90635_VERSION_MASK; if (FIELD_GET(MLX90635_DSP_FIXED, dsp_version)) { if (MLX90635_DSP_VERSION(dsp_version) == MLX90635_ID_DSPv1) { dev_dbg(&client->dev, "Detected DSP v1 calibration %x\n", dsp_version); } else { dev_dbg(&client->dev, "Detected Unknown EEPROM calibration %lx\n", MLX90635_DSP_VERSION(dsp_version)); } } else { return dev_err_probe(&client->dev, -EPROTONOSUPPORT, "Wrong fixed top bit %x (expected 0x8X0X)\n", dsp_version); } mlx90635->emissivity = 1000; mlx90635->interaction_ts = jiffies; /* Set initial value */ pm_runtime_get_noresume(&client->dev); pm_runtime_set_active(&client->dev); ret = devm_pm_runtime_enable(&client->dev); if (ret) return dev_err_probe(&client->dev, ret, "failed to enable powermanagement\n"); pm_runtime_set_autosuspend_delay(&client->dev, MLX90635_SLEEP_DELAY_MS); pm_runtime_use_autosuspend(&client->dev); pm_runtime_put_autosuspend(&client->dev); return devm_iio_device_register(&client->dev, indio_dev); } static const struct i2c_device_id mlx90635_id[] = { { "mlx90635" }, { } }; MODULE_DEVICE_TABLE(i2c, mlx90635_id); static const struct of_device_id mlx90635_of_match[] = { { .compatible = "melexis,mlx90635" }, { } }; MODULE_DEVICE_TABLE(of, mlx90635_of_match); static int mlx90635_pm_suspend(struct device *dev) { struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev)); int ret; ret = mlx90635_suspend(data); if (ret < 0) return ret; ret = regulator_disable(data->regulator); if (ret < 0) dev_err(regmap_get_device(data->regmap), "Failed to disable power regulator: %d\n", ret); return ret; } static int mlx90635_pm_resume(struct device *dev) { struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev)); int ret; ret = mlx90635_enable_regulator(data); if (ret < 0) return ret; return mlx90635_wakeup(data); } static int mlx90635_pm_runtime_suspend(struct device *dev) { struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev)); return mlx90635_pwr_sleep_step(data); } static const struct dev_pm_ops mlx90635_pm_ops = { SYSTEM_SLEEP_PM_OPS(mlx90635_pm_suspend, mlx90635_pm_resume) RUNTIME_PM_OPS(mlx90635_pm_runtime_suspend, NULL, NULL) }; static struct i2c_driver mlx90635_driver = { .driver = { .name = "mlx90635", .of_match_table = mlx90635_of_match, .pm = pm_ptr(&mlx90635_pm_ops), }, .probe = mlx90635_probe, .id_table = mlx90635_id, }; module_i2c_driver(mlx90635_driver); MODULE_AUTHOR("Crt Mori <[email protected]>"); MODULE_DESCRIPTION("Melexis MLX90635 contactless Infra Red temperature sensor driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* * pata_mpiix.c - Intel MPIIX PATA for new ATA layer * (C) 2005-2006 Red Hat Inc * Alan Cox <[email protected]> * * The MPIIX is different enough to the PIIX4 and friends that we give it * a separate driver. The old ide/pci code handles this by just not tuning * MPIIX at all. * * The MPIIX also differs in another important way from the majority of PIIX * devices. The chip is a bridge (pardon the pun) between the old world of * ISA IDE and PCI IDE. Although the ATA timings are PCI configured the actual * IDE controller is not decoded in PCI space and the chip does not claim to * be IDE class PCI. This requires slightly non-standard probe logic compared * with PCI IDE and also that we do not disable the device when our driver is * unloaded (as it has many other functions). * * The driver consciously keeps this logic internally to avoid pushing quirky * PATA history into the clean libata layer. * * Thinkpad specific note: If you boot an MPIIX using a thinkpad with a PCMCIA * hard disk present this driver will not detect it. This is not a bug. In this * configuration the secondary port of the MPIIX is disabled and the addresses * are decoded by the PCMCIA bridge and therefore are for a generic IDE driver * to operate. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #define DRV_NAME "pata_mpiix" #define DRV_VERSION "0.7.7" enum { IDETIM = 0x6C, /* IDE control register */ IORDY = (1 << 1), PPE = (1 << 2), FTIM = (1 << 0), ENABLED = (1 << 15), SECONDARY = (1 << 14) }; static int mpiix_pre_reset(struct ata_link *link, unsigned long deadline) { struct ata_port *ap = link->ap; struct pci_dev *pdev = to_pci_dev(ap->host->dev); static const struct pci_bits mpiix_enable_bits = { 0x6D, 1, 0x80, 0x80 }; if (!pci_test_config_bits(pdev, &mpiix_enable_bits)) return -ENOENT; return ata_sff_prereset(link, deadline); } /** * mpiix_set_piomode - set initial PIO mode data * @ap: ATA interface * @adev: ATA device * * Called to do the PIO mode setup. The MPIIX allows us to program the * IORDY sample point (2-5 clocks), recovery (1-4 clocks) and whether * prefetching or IORDY are used. * * This would get very ugly because we can only program timing for one * device at a time, the other gets PIO0. Fortunately libata calls * our qc_issue command before a command is issued so we can flip the * timings back and forth to reduce the pain. */ static void mpiix_set_piomode(struct ata_port *ap, struct ata_device *adev) { int control = 0; int pio = adev->pio_mode - XFER_PIO_0; struct pci_dev *pdev = to_pci_dev(ap->host->dev); u16 idetim; static const /* ISP RTC */ u8 timings[][2] = { { 0, 0 }, { 0, 0 }, { 1, 0 }, { 2, 1 }, { 2, 3 }, }; pci_read_config_word(pdev, IDETIM, &idetim); /* Mask the IORDY/TIME/PPE for this device */ if (adev->class == ATA_DEV_ATA) control |= PPE; /* Enable prefetch/posting for disk */ if (ata_pio_need_iordy(adev)) control |= IORDY; if (pio > 1) control |= FTIM; /* This drive is on the fast timing bank */ /* Mask out timing and clear both TIME bank selects */ idetim &= 0xCCEE; idetim &= ~(0x07 << (4 * adev->devno)); idetim |= control << (4 * adev->devno); idetim |= (timings[pio][0] << 12) | (timings[pio][1] << 8); pci_write_config_word(pdev, IDETIM, idetim); /* We use ap->private_data as a pointer to the device currently loaded for timing */ ap->private_data = adev; } /** * mpiix_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if * necessary. Our logic also clears TIME0/TIME1 for the other device so * that, even if we get this wrong, cycles to the other device will * be made PIO0. */ static unsigned int mpiix_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; /* If modes have been configured and the channel data is not loaded then load it. We have to check if pio_mode is set as the core code does not set adev->pio_mode to XFER_PIO_0 while probing as would be logical */ if (adev->pio_mode && adev != ap->private_data) mpiix_set_piomode(ap, adev); return ata_sff_qc_issue(qc); } static const struct scsi_host_template mpiix_sht = { ATA_PIO_SHT(DRV_NAME), }; static struct ata_port_operations mpiix_port_ops = { .inherits = &ata_sff_port_ops, .qc_issue = mpiix_qc_issue, .cable_detect = ata_cable_40wire, .set_piomode = mpiix_set_piomode, .prereset = mpiix_pre_reset, .sff_data_xfer = ata_sff_data_xfer32, }; static int mpiix_init_one(struct pci_dev *dev, const struct pci_device_id *id) { /* Single threaded by the PCI probe logic */ struct ata_host *host; struct ata_port *ap; void __iomem *cmd_addr, *ctl_addr; u16 idetim; int cmd, ctl, irq; ata_print_version_once(&dev->dev, DRV_VERSION); host = ata_host_alloc(&dev->dev, 1); if (!host) return -ENOMEM; ap = host->ports[0]; /* MPIIX has many functions which can be turned on or off according to other devices present. Make sure IDE is enabled before we try and use it */ pci_read_config_word(dev, IDETIM, &idetim); if (!(idetim & ENABLED)) return -ENODEV; /* See if it's primary or secondary channel... */ if (!(idetim & SECONDARY)) { cmd = 0x1F0; ctl = 0x3F6; irq = 14; } else { cmd = 0x170; ctl = 0x376; irq = 15; } cmd_addr = devm_ioport_map(&dev->dev, cmd, 8); ctl_addr = devm_ioport_map(&dev->dev, ctl, 1); if (!cmd_addr || !ctl_addr) return -ENOMEM; ata_port_desc(ap, "cmd 0x%x ctl 0x%x", cmd, ctl); /* We do our own plumbing to avoid leaking special cases for whacko ancient hardware into the core code. There are two issues to worry about. #1 The chip is a bridge so if in legacy mode and without BARs set fools the setup. #2 If you pci_disable_device the MPIIX your box goes castors up */ ap->ops = &mpiix_port_ops; ap->pio_mask = ATA_PIO4; ap->flags |= ATA_FLAG_SLAVE_POSS; ap->ioaddr.cmd_addr = cmd_addr; ap->ioaddr.ctl_addr = ctl_addr; ap->ioaddr.altstatus_addr = ctl_addr; /* Let libata fill in the port details */ ata_sff_std_ports(&ap->ioaddr); /* activate host */ return ata_host_activate(host, irq, ata_sff_interrupt, IRQF_SHARED, &mpiix_sht); } static const struct pci_device_id mpiix[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_82371MX), }, { }, }; static struct pci_driver mpiix_pci_driver = { .name = DRV_NAME, .id_table = mpiix, .probe = mpiix_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM_SLEEP .suspend = ata_pci_device_suspend, .resume = ata_pci_device_resume, #endif }; module_pci_driver(mpiix_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for Intel MPIIX"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, mpiix); MODULE_VERSION(DRV_VERSION);
/* * Defines, structures, APIs for edac_mc module * * (C) 2007 Linux Networx (http://lnxi.com) * This file may be distributed under the terms of the * GNU General Public License. * * Written by Thayne Harbaugh * Based on work by Dan Hollis <goemon at anime dot net> and others. * http://www.anime.net/~goemon/linux-ecc/ * * NMI handling support added by * Dave Peterson <[email protected]> <[email protected]> * * Refactored for multi-source files: * Doug Thompson <[email protected]> * * Please look at Documentation/driver-api/edac.rst for more info about * EDAC core structs and functions. */ #ifndef _EDAC_MC_H_ #define _EDAC_MC_H_ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/smp.h> #include <linux/pci.h> #include <linux/time.h> #include <linux/nmi.h> #include <linux/rcupdate.h> #include <linux/completion.h> #include <linux/kobject.h> #include <linux/platform_device.h> #include <linux/workqueue.h> #include <linux/edac.h> #if PAGE_SHIFT < 20 #define PAGES_TO_MiB(pages) ((pages) >> (20 - PAGE_SHIFT)) #define MiB_TO_PAGES(mb) ((mb) << (20 - PAGE_SHIFT)) #else /* PAGE_SHIFT > 20 */ #define PAGES_TO_MiB(pages) ((pages) << (PAGE_SHIFT - 20)) #define MiB_TO_PAGES(mb) ((mb) >> (PAGE_SHIFT - 20)) #endif #define edac_printk(level, prefix, fmt, arg...) \ printk(level "EDAC " prefix ": " fmt, ##arg) #define edac_mc_printk(mci, level, fmt, arg...) \ printk(level "EDAC MC%d: " fmt, mci->mc_idx, ##arg) #define edac_mc_chipset_printk(mci, level, prefix, fmt, arg...) \ printk(level "EDAC " prefix " MC%d: " fmt, mci->mc_idx, ##arg) #define edac_device_printk(ctl, level, fmt, arg...) \ printk(level "EDAC DEVICE%d: " fmt, ctl->dev_idx, ##arg) #define edac_pci_printk(ctl, level, fmt, arg...) \ printk(level "EDAC PCI%d: " fmt, ctl->pci_idx, ##arg) /* prefixes for edac_printk() and edac_mc_printk() */ #define EDAC_MC "MC" #define EDAC_PCI "PCI" #define EDAC_DEBUG "DEBUG" extern const char * const edac_mem_types[]; #ifdef CONFIG_EDAC_DEBUG extern int edac_debug_level; #define edac_dbg(level, fmt, ...) \ do { \ if (level <= edac_debug_level) \ edac_printk(KERN_DEBUG, EDAC_DEBUG, \ "%s: " fmt, __func__, ##__VA_ARGS__); \ } while (0) #else /* !CONFIG_EDAC_DEBUG */ #define edac_dbg(level, fmt, ...) \ do { \ if (0) \ edac_printk(KERN_DEBUG, EDAC_DEBUG, \ "%s: " fmt, __func__, ##__VA_ARGS__); \ } while (0) #endif /* !CONFIG_EDAC_DEBUG */ #define PCI_VEND_DEV(vend, dev) PCI_VENDOR_ID_ ## vend, \ PCI_DEVICE_ID_ ## vend ## _ ## dev #define edac_dev_name(dev) (dev)->dev_name #define to_mci(k) container_of(k, struct mem_ctl_info, dev) /** * edac_mc_alloc() - Allocate and partially fill a struct &mem_ctl_info. * * @mc_num: Memory controller number * @n_layers: Number of MC hierarchy layers * @layers: Describes each layer as seen by the Memory Controller * @sz_pvt: size of private storage needed * * * Everything is kmalloc'ed as one big chunk - more efficient. * Only can be used if all structures have the same lifetime - otherwise * you have to allocate and initialize your own structures. * * Use edac_mc_free() to free mc structures allocated by this function. * * .. note:: * * drivers handle multi-rank memories in different ways: in some * drivers, one multi-rank memory stick is mapped as one entry, while, in * others, a single multi-rank memory stick would be mapped into several * entries. Currently, this function will allocate multiple struct dimm_info * on such scenarios, as grouping the multiple ranks require drivers change. * * Returns: * On success, return a pointer to struct mem_ctl_info pointer; * %NULL otherwise */ struct mem_ctl_info *edac_mc_alloc(unsigned int mc_num, unsigned int n_layers, struct edac_mc_layer *layers, unsigned int sz_pvt); /** * edac_get_owner - Return the owner's mod_name of EDAC MC * * Returns: * Pointer to mod_name string when EDAC MC is owned. NULL otherwise. */ extern const char *edac_get_owner(void); /* * edac_mc_add_mc_with_groups() - Insert the @mci structure into the mci * global list and create sysfs entries associated with @mci structure. * * @mci: pointer to the mci structure to be added to the list * @groups: optional attribute groups for the driver-specific sysfs entries * * Returns: * 0 on Success, or an error code on failure */ extern int edac_mc_add_mc_with_groups(struct mem_ctl_info *mci, const struct attribute_group **groups); #define edac_mc_add_mc(mci) edac_mc_add_mc_with_groups(mci, NULL) /** * edac_mc_free() - Frees a previously allocated @mci structure * * @mci: pointer to a struct mem_ctl_info structure */ extern void edac_mc_free(struct mem_ctl_info *mci); /** * edac_has_mcs() - Check if any MCs have been allocated. * * Returns: * True if MC instances have been registered successfully. * False otherwise. */ extern bool edac_has_mcs(void); /** * edac_mc_find() - Search for a mem_ctl_info structure whose index is @idx. * * @idx: index to be seek * * If found, return a pointer to the structure. * Else return NULL. */ extern struct mem_ctl_info *edac_mc_find(int idx); /** * find_mci_by_dev() - Scan list of controllers looking for the one that * manages the @dev device. * * @dev: pointer to a struct device related with the MCI * * Returns: on success, returns a pointer to struct &mem_ctl_info; * %NULL otherwise. */ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev); /** * edac_mc_del_mc() - Remove sysfs entries for mci structure associated with * @dev and remove mci structure from global list. * * @dev: Pointer to struct &device representing mci structure to remove. * * Returns: pointer to removed mci structure, or %NULL if device not found. */ extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev); /** * edac_mc_find_csrow_by_page() - Ancillary routine to identify what csrow * contains a memory page. * * @mci: pointer to a struct mem_ctl_info structure * @page: memory page to find * * Returns: on success, returns the csrow. -1 if not found. */ extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page); /** * edac_raw_mc_handle_error() - Reports a memory event to userspace without * doing anything to discover the error location. * * @e: error description * * This raw function is used internally by edac_mc_handle_error(). It should * only be called directly when the hardware error come directly from BIOS, * like in the case of APEI GHES driver. */ void edac_raw_mc_handle_error(struct edac_raw_error_desc *e); /** * edac_mc_handle_error() - Reports a memory event to userspace. * * @type: severity of the error (CE/UE/Fatal) * @mci: a struct mem_ctl_info pointer * @error_count: Number of errors of the same type * @page_frame_number: mem page where the error occurred * @offset_in_page: offset of the error inside the page * @syndrome: ECC syndrome * @top_layer: Memory layer[0] position * @mid_layer: Memory layer[1] position * @low_layer: Memory layer[2] position * @msg: Message meaningful to the end users that * explains the event * @other_detail: Technical details about the event that * may help hardware manufacturers and * EDAC developers to analyse the event */ void edac_mc_handle_error(const enum hw_event_mc_err_type type, struct mem_ctl_info *mci, const u16 error_count, const unsigned long page_frame_number, const unsigned long offset_in_page, const unsigned long syndrome, const int top_layer, const int mid_layer, const int low_layer, const char *msg, const char *other_detail); /* * edac misc APIs */ extern char *edac_op_state_to_string(int op_state); #endif /* _EDAC_MC_H_ */
// SPDX-License-Identifier: GPL-2.0-or-later /* * taskstats.c - Export per-task statistics to userland * * Copyright (C) Shailabh Nagar, IBM Corp. 2006 * (C) Balbir Singh, IBM Corp. 2006 */ #include <linux/kernel.h> #include <linux/taskstats_kern.h> #include <linux/tsacct_kern.h> #include <linux/acct.h> #include <linux/delayacct.h> #include <linux/cpumask.h> #include <linux/percpu.h> #include <linux/slab.h> #include <linux/cgroupstats.h> #include <linux/cgroup.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/pid_namespace.h> #include <net/genetlink.h> #include <linux/atomic.h> #include <linux/sched/cputime.h> /* * Maximum length of a cpumask that can be specified in * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute */ #define TASKSTATS_CPUMASK_MAXLEN (100+6*NR_CPUS) static DEFINE_PER_CPU(__u32, taskstats_seqnum); static int family_registered; struct kmem_cache *taskstats_cache; static struct genl_family family; static const struct nla_policy taskstats_cmd_get_policy[] = { [TASKSTATS_CMD_ATTR_PID] = { .type = NLA_U32 }, [TASKSTATS_CMD_ATTR_TGID] = { .type = NLA_U32 }, [TASKSTATS_CMD_ATTR_REGISTER_CPUMASK] = { .type = NLA_STRING }, [TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK] = { .type = NLA_STRING },}; static const struct nla_policy cgroupstats_cmd_get_policy[] = { [CGROUPSTATS_CMD_ATTR_FD] = { .type = NLA_U32 }, }; struct listener { struct list_head list; pid_t pid; char valid; }; struct listener_list { struct rw_semaphore sem; struct list_head list; }; static DEFINE_PER_CPU(struct listener_list, listener_array); enum actions { REGISTER, DEREGISTER, CPU_DONT_CARE }; static int prepare_reply(struct genl_info *info, u8 cmd, struct sk_buff **skbp, size_t size) { struct sk_buff *skb; void *reply; /* * If new attributes are added, please revisit this allocation */ skb = genlmsg_new(size, GFP_KERNEL); if (!skb) return -ENOMEM; if (!info) { int seq = this_cpu_inc_return(taskstats_seqnum) - 1; reply = genlmsg_put(skb, 0, seq, &family, 0, cmd); } else reply = genlmsg_put_reply(skb, info, &family, 0, cmd); if (reply == NULL) { nlmsg_free(skb); return -EINVAL; } *skbp = skb; return 0; } /* * Send taskstats data in @skb to listener with nl_pid @pid */ static int send_reply(struct sk_buff *skb, struct genl_info *info) { struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); void *reply = genlmsg_data(genlhdr); genlmsg_end(skb, reply); return genlmsg_reply(skb, info); } /* * Send taskstats data in @skb to listeners registered for @cpu's exit data */ static void send_cpu_listeners(struct sk_buff *skb, struct listener_list *listeners) { struct genlmsghdr *genlhdr = nlmsg_data(nlmsg_hdr(skb)); struct listener *s, *tmp; struct sk_buff *skb_next, *skb_cur = skb; void *reply = genlmsg_data(genlhdr); int delcount = 0; genlmsg_end(skb, reply); down_read(&listeners->sem); list_for_each_entry(s, &listeners->list, list) { int rc; skb_next = NULL; if (!list_is_last(&s->list, &listeners->list)) { skb_next = skb_clone(skb_cur, GFP_KERNEL); if (!skb_next) break; } rc = genlmsg_unicast(&init_net, skb_cur, s->pid); if (rc == -ECONNREFUSED) { s->valid = 0; delcount++; } skb_cur = skb_next; } up_read(&listeners->sem); if (skb_cur) nlmsg_free(skb_cur); if (!delcount) return; /* Delete invalidated entries */ down_write(&listeners->sem); list_for_each_entry_safe(s, tmp, &listeners->list, list) { if (!s->valid) { list_del(&s->list); kfree(s); } } up_write(&listeners->sem); } static void exe_add_tsk(struct taskstats *stats, struct task_struct *tsk) { /* No idea if I'm allowed to access that here, now. */ struct file *exe_file = get_task_exe_file(tsk); if (exe_file) { /* Following cp_new_stat64() in stat.c . */ stats->ac_exe_dev = huge_encode_dev(exe_file->f_inode->i_sb->s_dev); stats->ac_exe_inode = exe_file->f_inode->i_ino; fput(exe_file); } else { stats->ac_exe_dev = 0; stats->ac_exe_inode = 0; } } static void fill_stats(struct user_namespace *user_ns, struct pid_namespace *pid_ns, struct task_struct *tsk, struct taskstats *stats) { memset(stats, 0, sizeof(*stats)); /* * Each accounting subsystem adds calls to its functions to * fill in relevant parts of struct taskstsats as follows * * per-task-foo(stats, tsk); */ delayacct_add_tsk(stats, tsk); /* fill in basic acct fields */ stats->version = TASKSTATS_VERSION; stats->nvcsw = tsk->nvcsw; stats->nivcsw = tsk->nivcsw; bacct_add_tsk(user_ns, pid_ns, stats, tsk); /* fill in extended acct fields */ xacct_add_tsk(stats, tsk); /* add executable info */ exe_add_tsk(stats, tsk); } static int fill_stats_for_pid(pid_t pid, struct taskstats *stats) { struct task_struct *tsk; tsk = find_get_task_by_vpid(pid); if (!tsk) return -ESRCH; fill_stats(current_user_ns(), task_active_pid_ns(current), tsk, stats); put_task_struct(tsk); return 0; } static int fill_stats_for_tgid(pid_t tgid, struct taskstats *stats) { struct task_struct *tsk, *first; unsigned long flags; int rc = -ESRCH; u64 delta, utime, stime; u64 start_time; /* * Add additional stats from live tasks except zombie thread group * leaders who are already counted with the dead tasks */ rcu_read_lock(); first = find_task_by_vpid(tgid); if (!first || !lock_task_sighand(first, &flags)) goto out; if (first->signal->stats) memcpy(stats, first->signal->stats, sizeof(*stats)); else memset(stats, 0, sizeof(*stats)); start_time = ktime_get_ns(); for_each_thread(first, tsk) { if (tsk->exit_state) continue; /* * Accounting subsystem can call its functions here to * fill in relevant parts of struct taskstsats as follows * * per-task-foo(stats, tsk); */ delayacct_add_tsk(stats, tsk); /* calculate task elapsed time in nsec */ delta = start_time - tsk->start_time; /* Convert to micro seconds */ do_div(delta, NSEC_PER_USEC); stats->ac_etime += delta; task_cputime(tsk, &utime, &stime); stats->ac_utime += div_u64(utime, NSEC_PER_USEC); stats->ac_stime += div_u64(stime, NSEC_PER_USEC); stats->nvcsw += tsk->nvcsw; stats->nivcsw += tsk->nivcsw; } unlock_task_sighand(first, &flags); rc = 0; out: rcu_read_unlock(); stats->version = TASKSTATS_VERSION; /* * Accounting subsystems can also add calls here to modify * fields of taskstats. */ return rc; } static void fill_tgid_exit(struct task_struct *tsk) { unsigned long flags; spin_lock_irqsave(&tsk->sighand->siglock, flags); if (!tsk->signal->stats) goto ret; /* * Each accounting subsystem calls its functions here to * accumalate its per-task stats for tsk, into the per-tgid structure * * per-task-foo(tsk->signal->stats, tsk); */ delayacct_add_tsk(tsk->signal->stats, tsk); ret: spin_unlock_irqrestore(&tsk->sighand->siglock, flags); return; } static int add_del_listener(pid_t pid, const struct cpumask *mask, int isadd) { struct listener_list *listeners; struct listener *s, *tmp, *s2; unsigned int cpu; int ret = 0; if (!cpumask_subset(mask, cpu_possible_mask)) return -EINVAL; if (current_user_ns() != &init_user_ns) return -EINVAL; if (task_active_pid_ns(current) != &init_pid_ns) return -EINVAL; if (isadd == REGISTER) { for_each_cpu(cpu, mask) { s = kmalloc_node(sizeof(struct listener), GFP_KERNEL, cpu_to_node(cpu)); if (!s) { ret = -ENOMEM; goto cleanup; } s->pid = pid; s->valid = 1; listeners = &per_cpu(listener_array, cpu); down_write(&listeners->sem); list_for_each_entry(s2, &listeners->list, list) { if (s2->pid == pid && s2->valid) goto exists; } list_add(&s->list, &listeners->list); s = NULL; exists: up_write(&listeners->sem); kfree(s); /* nop if NULL */ } return 0; } /* Deregister or cleanup */ cleanup: for_each_cpu(cpu, mask) { listeners = &per_cpu(listener_array, cpu); down_write(&listeners->sem); list_for_each_entry_safe(s, tmp, &listeners->list, list) { if (s->pid == pid) { list_del(&s->list); kfree(s); break; } } up_write(&listeners->sem); } return ret; } static int parse(struct nlattr *na, struct cpumask *mask) { char *data; int len; int ret; if (na == NULL) return 1; len = nla_len(na); if (len > TASKSTATS_CPUMASK_MAXLEN) return -E2BIG; if (len < 1) return -EINVAL; data = kmalloc(len, GFP_KERNEL); if (!data) return -ENOMEM; nla_strscpy(data, na, len); ret = cpulist_parse(data, mask); kfree(data); return ret; } static struct taskstats *mk_reply(struct sk_buff *skb, int type, u32 pid) { struct nlattr *na, *ret; int aggr; aggr = (type == TASKSTATS_TYPE_PID) ? TASKSTATS_TYPE_AGGR_PID : TASKSTATS_TYPE_AGGR_TGID; na = nla_nest_start_noflag(skb, aggr); if (!na) goto err; if (nla_put(skb, type, sizeof(pid), &pid) < 0) { nla_nest_cancel(skb, na); goto err; } ret = nla_reserve_64bit(skb, TASKSTATS_TYPE_STATS, sizeof(struct taskstats), TASKSTATS_TYPE_NULL); if (!ret) { nla_nest_cancel(skb, na); goto err; } nla_nest_end(skb, na); return nla_data(ret); err: return NULL; } static int cgroupstats_user_cmd(struct sk_buff *skb, struct genl_info *info) { int rc = 0; struct sk_buff *rep_skb; struct cgroupstats *stats; struct nlattr *na; size_t size; u32 fd; na = info->attrs[CGROUPSTATS_CMD_ATTR_FD]; if (!na) return -EINVAL; fd = nla_get_u32(info->attrs[CGROUPSTATS_CMD_ATTR_FD]); CLASS(fd, f)(fd); if (fd_empty(f)) return 0; size = nla_total_size(sizeof(struct cgroupstats)); rc = prepare_reply(info, CGROUPSTATS_CMD_NEW, &rep_skb, size); if (rc < 0) return rc; na = nla_reserve(rep_skb, CGROUPSTATS_TYPE_CGROUP_STATS, sizeof(struct cgroupstats)); if (na == NULL) { nlmsg_free(rep_skb); return -EMSGSIZE; } stats = nla_data(na); memset(stats, 0, sizeof(*stats)); rc = cgroupstats_build(stats, fd_file(f)->f_path.dentry); if (rc < 0) { nlmsg_free(rep_skb); return rc; } return send_reply(rep_skb, info); } static int cmd_attr_register_cpumask(struct genl_info *info) { cpumask_var_t mask; int rc; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; rc = parse(info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK], mask); if (rc < 0) goto out; rc = add_del_listener(info->snd_portid, mask, REGISTER); out: free_cpumask_var(mask); return rc; } static int cmd_attr_deregister_cpumask(struct genl_info *info) { cpumask_var_t mask; int rc; if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; rc = parse(info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK], mask); if (rc < 0) goto out; rc = add_del_listener(info->snd_portid, mask, DEREGISTER); out: free_cpumask_var(mask); return rc; } static size_t taskstats_packet_size(void) { size_t size; size = nla_total_size(sizeof(u32)) + nla_total_size_64bit(sizeof(struct taskstats)) + nla_total_size(0); return size; } static int cmd_attr_pid(struct genl_info *info) { struct taskstats *stats; struct sk_buff *rep_skb; size_t size; u32 pid; int rc; size = taskstats_packet_size(); rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); if (rc < 0) return rc; rc = -EINVAL; pid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_PID]); stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, pid); if (!stats) goto err; rc = fill_stats_for_pid(pid, stats); if (rc < 0) goto err; return send_reply(rep_skb, info); err: nlmsg_free(rep_skb); return rc; } static int cmd_attr_tgid(struct genl_info *info) { struct taskstats *stats; struct sk_buff *rep_skb; size_t size; u32 tgid; int rc; size = taskstats_packet_size(); rc = prepare_reply(info, TASKSTATS_CMD_NEW, &rep_skb, size); if (rc < 0) return rc; rc = -EINVAL; tgid = nla_get_u32(info->attrs[TASKSTATS_CMD_ATTR_TGID]); stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, tgid); if (!stats) goto err; rc = fill_stats_for_tgid(tgid, stats); if (rc < 0) goto err; return send_reply(rep_skb, info); err: nlmsg_free(rep_skb); return rc; } static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info) { if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK]) return cmd_attr_register_cpumask(info); else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK]) return cmd_attr_deregister_cpumask(info); else if (info->attrs[TASKSTATS_CMD_ATTR_PID]) return cmd_attr_pid(info); else if (info->attrs[TASKSTATS_CMD_ATTR_TGID]) return cmd_attr_tgid(info); else return -EINVAL; } static struct taskstats *taskstats_tgid_alloc(struct task_struct *tsk) { struct signal_struct *sig = tsk->signal; struct taskstats *stats_new, *stats; /* Pairs with smp_store_release() below. */ stats = smp_load_acquire(&sig->stats); if (stats || thread_group_empty(tsk)) return stats; /* No problem if kmem_cache_zalloc() fails */ stats_new = kmem_cache_zalloc(taskstats_cache, GFP_KERNEL); spin_lock_irq(&tsk->sighand->siglock); stats = sig->stats; if (!stats) { /* * Pairs with smp_store_release() above and order the * kmem_cache_zalloc(). */ smp_store_release(&sig->stats, stats_new); stats = stats_new; stats_new = NULL; } spin_unlock_irq(&tsk->sighand->siglock); if (stats_new) kmem_cache_free(taskstats_cache, stats_new); return stats; } /* Send pid data out on exit */ void taskstats_exit(struct task_struct *tsk, int group_dead) { int rc; struct listener_list *listeners; struct taskstats *stats; struct sk_buff *rep_skb; size_t size; int is_thread_group; if (!family_registered) return; /* * Size includes space for nested attributes */ size = taskstats_packet_size(); is_thread_group = !!taskstats_tgid_alloc(tsk); if (is_thread_group) { /* PID + STATS + TGID + STATS */ size = 2 * size; /* fill the tsk->signal->stats structure */ fill_tgid_exit(tsk); } listeners = raw_cpu_ptr(&listener_array); if (list_empty(&listeners->list)) return; rc = prepare_reply(NULL, TASKSTATS_CMD_NEW, &rep_skb, size); if (rc < 0) return; stats = mk_reply(rep_skb, TASKSTATS_TYPE_PID, task_pid_nr_ns(tsk, &init_pid_ns)); if (!stats) goto err; fill_stats(&init_user_ns, &init_pid_ns, tsk, stats); if (group_dead) stats->ac_flag |= AGROUP; /* * Doesn't matter if tsk is the leader or the last group member leaving */ if (!is_thread_group || !group_dead) goto send; stats = mk_reply(rep_skb, TASKSTATS_TYPE_TGID, task_tgid_nr_ns(tsk, &init_pid_ns)); if (!stats) goto err; memcpy(stats, tsk->signal->stats, sizeof(*stats)); send: send_cpu_listeners(rep_skb, listeners); return; err: nlmsg_free(rep_skb); } static const struct genl_ops taskstats_ops[] = { { .cmd = TASKSTATS_CMD_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = taskstats_user_cmd, .policy = taskstats_cmd_get_policy, .maxattr = ARRAY_SIZE(taskstats_cmd_get_policy) - 1, .flags = GENL_ADMIN_PERM, }, { .cmd = CGROUPSTATS_CMD_GET, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = cgroupstats_user_cmd, .policy = cgroupstats_cmd_get_policy, .maxattr = ARRAY_SIZE(cgroupstats_cmd_get_policy) - 1, }, }; static struct genl_family family __ro_after_init = { .name = TASKSTATS_GENL_NAME, .version = TASKSTATS_GENL_VERSION, .module = THIS_MODULE, .ops = taskstats_ops, .n_ops = ARRAY_SIZE(taskstats_ops), .resv_start_op = CGROUPSTATS_CMD_GET + 1, .netnsok = true, }; /* Needed early in initialization */ void __init taskstats_init_early(void) { unsigned int i; taskstats_cache = KMEM_CACHE(taskstats, SLAB_PANIC); for_each_possible_cpu(i) { INIT_LIST_HEAD(&(per_cpu(listener_array, i).list)); init_rwsem(&(per_cpu(listener_array, i).sem)); } } static int __init taskstats_init(void) { int rc; rc = genl_register_family(&family); if (rc) return rc; family_registered = 1; pr_info("registered taskstats version %d\n", TASKSTATS_GENL_VERSION); return 0; } /* * late initcall ensures initialization of statistics collection * mechanisms precedes initialization of the taskstats interface */ late_initcall(taskstats_init);
/* SPDX-License-Identifier: GPL-2.0 */ /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. */ #ifndef __IA_CSS_CTC2_PARAM_H #define __IA_CSS_CTC2_PARAM_H #define IA_CSS_CTC_COEF_SHIFT 13 #include "vmem.h" /* needed for VMEM_ARRAY */ /* CTC (Chroma Tone Control)ISP Parameters */ /*VMEM Luma params*/ struct ia_css_isp_ctc2_vmem_params { /** Gains by Y(Luma) at Y = 0.0,Y_X1, Y_X2, Y_X3, Y_X4*/ VMEM_ARRAY(y_x, ISP_VEC_NELEMS); /* kneepoints by Y(Luma) 0.0, y_x1, y_x2, y _x3, y_x4*/ VMEM_ARRAY(y_y, ISP_VEC_NELEMS); /* Slopes of lines interconnecting * 0.0 -> y_x1 -> y_x2 -> y _x3 -> y_x4 -> 1.0*/ VMEM_ARRAY(e_y_slope, ISP_VEC_NELEMS); }; /*DMEM Chroma params*/ struct ia_css_isp_ctc2_dmem_params { /* Gains by UV(Chroma) under kneepoints uv_x0 and uv_x1*/ s32 uv_y0; s32 uv_y1; /* Kneepoints by UV(Chroma)- uv_x0 and uv_x1*/ s32 uv_x0; s32 uv_x1; /* Slope of line interconnecting uv_x0 -> uv_x1*/ s32 uv_dydx; }; #endif /* __IA_CSS_CTC2_PARAM_H */