code
stringlengths
0
23.9M
// SPDX-License-Identifier: GPL-2.0-only /* * stdlib functions * * Author: Scott Wood <[email protected]> * * Copyright (c) 2007 Freescale Semiconductor, Inc. */ #include "stdlib.h" /* Not currently supported: leading whitespace, sign, 0x prefix, zero base */ unsigned long long int strtoull(const char *ptr, char **end, int base) { unsigned long long ret = 0; if (base > 36) goto out; while (*ptr) { int digit; if (*ptr >= '0' && *ptr <= '9' && *ptr < '0' + base) digit = *ptr - '0'; else if (*ptr >= 'A' && *ptr < 'A' + base - 10) digit = *ptr - 'A' + 10; else if (*ptr >= 'a' && *ptr < 'a' + base - 10) digit = *ptr - 'a' + 10; else break; ret *= base; ret += digit; ptr++; } out: if (end) *end = (char *)ptr; return ret; }
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* * Device Tree Source for the H3ULCB Kingfisher board with R-Car H3e-2G * * Copyright (C) 2021 Glider bv * * Based on r8a77951-ulcb-kf.dts * Copyright (C) 2017 Renesas Electronics Corp. * Copyright (C) 2017 Cogent Embedded, Inc. */ #include "r8a779m1-ulcb.dts" #include "ulcb-kf.dtsi" / { model = "Renesas H3ULCB Kingfisher board based on r8a779m1"; compatible = "shimafuji,kingfisher", "renesas,h3ulcb", "renesas,r8a779m1", "renesas,r8a7795"; };
/******************************************************************* * This file is part of the Emulex Linux Device Driver for * * Fibre Channel Host Bus Adapters. * * Copyright (C) 2017-2024 Broadcom. All Rights Reserved. The term * * “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. * * Copyright (C) 2004-2016 Emulex. All rights reserved. * * EMULEX and SLI are trademarks of Emulex. * * www.broadcom.com * * * * This program is free software; you can redistribute it and/or * * modify it under the terms of version 2 of the GNU General * * Public License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful. * * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * * TO BE LEGALLY INVALID. See the GNU General Public License for * * more details, a copy of which can be found in the file COPYING * * included with this package. * *******************************************************************/ /* * Fibre Channel SCSI LAN Device Driver CT support: FC Generic Services FC-GS */ #include <linux/blkdev.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/utsname.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_transport_fc.h> #include <scsi/fc/fc_fs.h> #include "lpfc_hw4.h" #include "lpfc_hw.h" #include "lpfc_sli.h" #include "lpfc_sli4.h" #include "lpfc_nl.h" #include "lpfc_disc.h" #include "lpfc.h" #include "lpfc_scsi.h" #include "lpfc_logmsg.h" #include "lpfc_crtn.h" #include "lpfc_version.h" #include "lpfc_vport.h" #include "lpfc_debugfs.h" /* FDMI Port Speed definitions - FC-GS-7 */ #define HBA_PORTSPEED_1GFC 0x00000001 /* 1G FC */ #define HBA_PORTSPEED_2GFC 0x00000002 /* 2G FC */ #define HBA_PORTSPEED_4GFC 0x00000008 /* 4G FC */ #define HBA_PORTSPEED_10GFC 0x00000004 /* 10G FC */ #define HBA_PORTSPEED_8GFC 0x00000010 /* 8G FC */ #define HBA_PORTSPEED_16GFC 0x00000020 /* 16G FC */ #define HBA_PORTSPEED_32GFC 0x00000040 /* 32G FC */ #define HBA_PORTSPEED_20GFC 0x00000080 /* 20G FC */ #define HBA_PORTSPEED_40GFC 0x00000100 /* 40G FC */ #define HBA_PORTSPEED_128GFC 0x00000200 /* 128G FC */ #define HBA_PORTSPEED_64GFC 0x00000400 /* 64G FC */ #define HBA_PORTSPEED_256GFC 0x00000800 /* 256G FC */ #define HBA_PORTSPEED_UNKNOWN 0x00008000 /* Unknown */ #define HBA_PORTSPEED_10GE 0x00010000 /* 10G E */ #define HBA_PORTSPEED_40GE 0x00020000 /* 40G E */ #define HBA_PORTSPEED_100GE 0x00040000 /* 100G E */ #define HBA_PORTSPEED_25GE 0x00080000 /* 25G E */ #define HBA_PORTSPEED_50GE 0x00100000 /* 50G E */ #define HBA_PORTSPEED_400GE 0x00200000 /* 400G E */ #define FOURBYTES 4 static char *lpfc_release_version = LPFC_DRIVER_VERSION; static void lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb); static void lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, struct lpfc_dmabuf *mp, uint32_t size) { if (!mp) { lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "0146 Ignoring unsolicited CT No HBQ " "status = x%x\n", get_job_ulpstatus(phba, piocbq)); } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, "0145 Ignoring unsolicited CT HBQ Size:%d " "status = x%x\n", size, get_job_ulpstatus(phba, piocbq)); } static void lpfc_ct_unsol_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, struct lpfc_dmabuf *mp, uint32_t size) { lpfc_ct_ignore_hbq_buffer(phba, piocbq, mp, size); } /** * lpfc_ct_unsol_cmpl : Completion callback function for unsol ct commands * @phba : pointer to lpfc hba data structure. * @cmdiocb : pointer to lpfc command iocb data structure. * @rspiocb : pointer to lpfc response iocb data structure. * * This routine is the callback function for issuing unsol ct reject command. * The memory allocated in the reject command path is freed up here. **/ static void lpfc_ct_unsol_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_nodelist *ndlp; struct lpfc_dmabuf *mp, *bmp; ndlp = cmdiocb->ndlp; if (ndlp) lpfc_nlp_put(ndlp); mp = cmdiocb->rsp_dmabuf; bmp = cmdiocb->bpl_dmabuf; if (mp) { lpfc_mbuf_free(phba, mp->virt, mp->phys); kfree(mp); cmdiocb->rsp_dmabuf = NULL; } if (bmp) { lpfc_mbuf_free(phba, bmp->virt, bmp->phys); kfree(bmp); cmdiocb->bpl_dmabuf = NULL; } lpfc_sli_release_iocbq(phba, cmdiocb); } /** * lpfc_ct_reject_event - Issue reject for unhandled CT MIB commands * @ndlp: pointer to a node-list data structure. * @ct_req: pointer to the CT request data structure. * @ulp_context: context of received UNSOL CT command * @ox_id: ox_id of the UNSOL CT command * * This routine is invoked by the lpfc_ct_handle_mibreq routine for sending * a reject response. Reject response is sent for the unhandled commands. **/ static void lpfc_ct_reject_event(struct lpfc_nodelist *ndlp, struct lpfc_sli_ct_request *ct_req, u16 ulp_context, u16 ox_id) { struct lpfc_vport *vport = ndlp->vport; struct lpfc_hba *phba = vport->phba; struct lpfc_sli_ct_request *ct_rsp; struct lpfc_iocbq *cmdiocbq = NULL; struct lpfc_dmabuf *bmp = NULL; struct lpfc_dmabuf *mp = NULL; struct ulp_bde64 *bpl; u8 rc = 0; u32 tmo; /* fill in BDEs for command */ mp = kmalloc(sizeof(*mp), GFP_KERNEL); if (!mp) { rc = 1; goto ct_exit; } mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &mp->phys); if (!mp->virt) { rc = 2; goto ct_free_mp; } /* Allocate buffer for Buffer ptr list */ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); if (!bmp) { rc = 3; goto ct_free_mpvirt; } bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &bmp->phys); if (!bmp->virt) { rc = 4; goto ct_free_bmp; } INIT_LIST_HEAD(&mp->list); INIT_LIST_HEAD(&bmp->list); bpl = (struct ulp_bde64 *)bmp->virt; memset(bpl, 0, sizeof(struct ulp_bde64)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys)); bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys)); bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64; bpl->tus.f.bdeSize = (LPFC_CT_PREAMBLE - 4); bpl->tus.w = le32_to_cpu(bpl->tus.w); ct_rsp = (struct lpfc_sli_ct_request *)mp->virt; memset(ct_rsp, 0, sizeof(struct lpfc_sli_ct_request)); ct_rsp->RevisionId.bits.Revision = SLI_CT_REVISION; ct_rsp->RevisionId.bits.InId = 0; ct_rsp->FsType = ct_req->FsType; ct_rsp->FsSubType = ct_req->FsSubType; ct_rsp->CommandResponse.bits.Size = 0; ct_rsp->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CT_RESPONSE_FS_RJT); ct_rsp->ReasonCode = SLI_CT_REQ_NOT_SUPPORTED; ct_rsp->Explanation = SLI_CT_NO_ADDITIONAL_EXPL; cmdiocbq = lpfc_sli_get_iocbq(phba); if (!cmdiocbq) { rc = 5; goto ct_free_bmpvirt; } if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, phba->sli4_hba.rpi_ids[ndlp->nlp_rpi], ox_id, 1, FC_RCTL_DD_SOL_CTL, 1, CMD_XMIT_SEQUENCE64_WQE); } else { lpfc_sli_prep_xmit_seq64(phba, cmdiocbq, bmp, 0, ulp_context, 1, FC_RCTL_DD_SOL_CTL, 1, CMD_XMIT_SEQUENCE64_CX); } /* Save for completion so we can release these resources */ cmdiocbq->rsp_dmabuf = mp; cmdiocbq->bpl_dmabuf = bmp; cmdiocbq->cmd_cmpl = lpfc_ct_unsol_cmpl; tmo = (3 * phba->fc_ratov); cmdiocbq->retry = 0; cmdiocbq->vport = vport; cmdiocbq->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; cmdiocbq->ndlp = lpfc_nlp_get(ndlp); if (!cmdiocbq->ndlp) goto ct_no_ndlp; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0); if (rc) { lpfc_nlp_put(ndlp); goto ct_no_ndlp; } return; ct_no_ndlp: rc = 6; lpfc_sli_release_iocbq(phba, cmdiocbq); ct_free_bmpvirt: lpfc_mbuf_free(phba, bmp->virt, bmp->phys); ct_free_bmp: kfree(bmp); ct_free_mpvirt: lpfc_mbuf_free(phba, mp->virt, mp->phys); ct_free_mp: kfree(mp); ct_exit: lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS, "6440 Unsol CT: Rsp err %d Data: x%lx\n", rc, vport->fc_flag); } /** * lpfc_ct_handle_mibreq - Process an unsolicited CT MIB request data buffer * @phba: pointer to lpfc hba data structure. * @ctiocbq: pointer to lpfc CT command iocb data structure. * * This routine is used for processing the IOCB associated with a unsolicited * CT MIB request. It first determines whether there is an existing ndlp that * matches the DID from the unsolicited IOCB. If not, it will return. **/ static void lpfc_ct_handle_mibreq(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocbq) { struct lpfc_sli_ct_request *ct_req; struct lpfc_nodelist *ndlp = NULL; struct lpfc_vport *vport = ctiocbq->vport; u32 ulp_status = get_job_ulpstatus(phba, ctiocbq); u32 ulp_word4 = get_job_word4(phba, ctiocbq); u32 did; u16 mi_cmd; did = bf_get(els_rsp64_sid, &ctiocbq->wqe.xmit_els_rsp); if (ulp_status) { lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, "6438 Unsol CT: status:x%x/x%x did : x%x\n", ulp_status, ulp_word4, did); return; } /* Ignore traffic received during vport shutdown */ if (test_bit(FC_UNLOADING, &vport->load_flag)) return; ndlp = lpfc_findnode_did(vport, did); if (!ndlp) { lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, "6439 Unsol CT: NDLP Not Found for DID : x%x", did); return; } ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; mi_cmd = be16_to_cpu(ct_req->CommandResponse.bits.CmdRsp); lpfc_vlog_msg(vport, KERN_WARNING, LOG_ELS, "6442 MI Cmd : x%x Not Supported\n", mi_cmd); lpfc_ct_reject_event(ndlp, ct_req, bf_get(wqe_ctxt_tag, &ctiocbq->wqe.xmit_els_rsp.wqe_com), bf_get(wqe_rcvoxid, &ctiocbq->wqe.xmit_els_rsp.wqe_com)); } /** * lpfc_ct_unsol_event - Process an unsolicited event from a ct sli ring * @phba: pointer to lpfc hba data structure. * @pring: pointer to a SLI ring. * @ctiocbq: pointer to lpfc ct iocb data structure. * * This routine is used to process an unsolicited event received from a SLI * (Service Level Interface) ring. The actual processing of the data buffer * associated with the unsolicited event is done by invoking appropriate routine * after properly set up the iocb buffer from the SLI ring on which the * unsolicited event was received. **/ void lpfc_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, struct lpfc_iocbq *ctiocbq) { struct lpfc_dmabuf *mp = NULL; IOCB_t *icmd = &ctiocbq->iocb; int i; struct lpfc_iocbq *iocbq; struct lpfc_iocbq *iocb; dma_addr_t dma_addr; uint32_t size; struct list_head head; struct lpfc_sli_ct_request *ct_req; struct lpfc_dmabuf *bdeBuf1 = ctiocbq->cmd_dmabuf; struct lpfc_dmabuf *bdeBuf2 = ctiocbq->bpl_dmabuf; u32 status, parameter, bde_count = 0; struct lpfc_wcqe_complete *wcqe_cmpl = NULL; ctiocbq->cmd_dmabuf = NULL; ctiocbq->rsp_dmabuf = NULL; ctiocbq->bpl_dmabuf = NULL; wcqe_cmpl = &ctiocbq->wcqe_cmpl; status = get_job_ulpstatus(phba, ctiocbq); parameter = get_job_word4(phba, ctiocbq); if (phba->sli_rev == LPFC_SLI_REV4) bde_count = wcqe_cmpl->word3; else bde_count = icmd->ulpBdeCount; if (unlikely(status == IOSTAT_NEED_BUFFER)) { lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ); } else if ((status == IOSTAT_LOCAL_REJECT) && ((parameter & IOERR_PARAM_MASK) == IOERR_RCV_BUFFER_WAITING)) { /* Not enough posted buffers; Try posting more buffers */ phba->fc_stat.NoRcvBuf++; if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)) lpfc_sli3_post_buffer(phba, pring, 2); return; } /* If there are no BDEs associated * with this IOCB, there is nothing to do. */ if (bde_count == 0) return; ctiocbq->cmd_dmabuf = bdeBuf1; if (bde_count == 2) ctiocbq->bpl_dmabuf = bdeBuf2; ct_req = (struct lpfc_sli_ct_request *)ctiocbq->cmd_dmabuf->virt; if (ct_req->FsType == SLI_CT_MANAGEMENT_SERVICE && ct_req->FsSubType == SLI_CT_MIB_Subtypes) { lpfc_ct_handle_mibreq(phba, ctiocbq); } else { if (!lpfc_bsg_ct_unsol_event(phba, pring, ctiocbq)) return; } if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) { INIT_LIST_HEAD(&head); list_add_tail(&head, &ctiocbq->list); list_for_each_entry(iocb, &head, list) { if (phba->sli_rev == LPFC_SLI_REV4) bde_count = iocb->wcqe_cmpl.word3; else bde_count = iocb->iocb.ulpBdeCount; if (!bde_count) continue; bdeBuf1 = iocb->cmd_dmabuf; iocb->cmd_dmabuf = NULL; if (phba->sli_rev == LPFC_SLI_REV4) size = iocb->wqe.gen_req.bde.tus.f.bdeSize; else size = iocb->iocb.un.cont64[0].tus.f.bdeSize; lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf1, size); lpfc_in_buf_free(phba, bdeBuf1); if (bde_count == 2) { bdeBuf2 = iocb->bpl_dmabuf; iocb->bpl_dmabuf = NULL; if (phba->sli_rev == LPFC_SLI_REV4) size = iocb->unsol_rcv_len; else size = iocb->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize; lpfc_ct_unsol_buffer(phba, ctiocbq, bdeBuf2, size); lpfc_in_buf_free(phba, bdeBuf2); } } list_del(&head); } else { INIT_LIST_HEAD(&head); list_add_tail(&head, &ctiocbq->list); list_for_each_entry(iocbq, &head, list) { icmd = &iocbq->iocb; if (icmd->ulpBdeCount == 0) lpfc_ct_unsol_buffer(phba, iocbq, NULL, 0); for (i = 0; i < icmd->ulpBdeCount; i++) { dma_addr = getPaddr(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow); mp = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr); size = icmd->un.cont64[i].tus.f.bdeSize; lpfc_ct_unsol_buffer(phba, iocbq, mp, size); lpfc_in_buf_free(phba, mp); } lpfc_sli3_post_buffer(phba, pring, i); } list_del(&head); } } /** * lpfc_ct_handle_unsol_abort - ct upper level protocol abort handler * @phba: Pointer to HBA context object. * @dmabuf: pointer to a dmabuf that describes the FC sequence * * This function serves as the upper level protocol abort handler for CT * protocol. * * Return 1 if abort has been handled, 0 otherwise. **/ int lpfc_ct_handle_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf) { int handled; /* CT upper level goes through BSG */ handled = lpfc_bsg_ct_unsol_abort(phba, dmabuf); return handled; } static void lpfc_free_ct_rsp(struct lpfc_hba *phba, struct lpfc_dmabuf *mlist) { struct lpfc_dmabuf *mlast, *next_mlast; list_for_each_entry_safe(mlast, next_mlast, &mlist->list, list) { list_del(&mlast->list); lpfc_mbuf_free(phba, mlast->virt, mlast->phys); kfree(mlast); } lpfc_mbuf_free(phba, mlist->virt, mlist->phys); kfree(mlist); return; } static struct lpfc_dmabuf * lpfc_alloc_ct_rsp(struct lpfc_hba *phba, __be16 cmdcode, struct ulp_bde64 *bpl, uint32_t size, int *entries) { struct lpfc_dmabuf *mlist = NULL; struct lpfc_dmabuf *mp; int cnt, i = 0; /* We get chunks of FCELSSIZE */ cnt = size > FCELSSIZE ? FCELSSIZE: size; while (size) { /* Allocate buffer for rsp payload */ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { if (mlist) lpfc_free_ct_rsp(phba, mlist); return NULL; } INIT_LIST_HEAD(&mp->list); if (be16_to_cpu(cmdcode) == SLI_CTNS_GID_FT || be16_to_cpu(cmdcode) == SLI_CTNS_GFF_ID) mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); else mp->virt = lpfc_mbuf_alloc(phba, 0, &(mp->phys)); if (!mp->virt) { kfree(mp); if (mlist) lpfc_free_ct_rsp(phba, mlist); return NULL; } /* Queue it to a linked list */ if (!mlist) mlist = mp; else list_add_tail(&mp->list, &mlist->list); bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I; /* build buffer ptr list for IOCB */ bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); bpl->tus.f.bdeSize = (uint16_t) cnt; bpl->tus.w = le32_to_cpu(bpl->tus.w); bpl++; i++; size -= cnt; } *entries = i; return mlist; } int lpfc_ct_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *ctiocb) { struct lpfc_dmabuf *buf_ptr; /* IOCBQ job structure gets cleaned during release. Just release * the dma buffers here. */ if (ctiocb->cmd_dmabuf) { buf_ptr = ctiocb->cmd_dmabuf; lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); ctiocb->cmd_dmabuf = NULL; } if (ctiocb->rsp_dmabuf) { lpfc_free_ct_rsp(phba, ctiocb->rsp_dmabuf); ctiocb->rsp_dmabuf = NULL; } if (ctiocb->bpl_dmabuf) { buf_ptr = ctiocb->bpl_dmabuf; lpfc_mbuf_free(phba, buf_ptr->virt, buf_ptr->phys); kfree(buf_ptr); ctiocb->bpl_dmabuf = NULL; } lpfc_sli_release_iocbq(phba, ctiocb); return 0; } /* * lpfc_gen_req - Build and issue a GEN_REQUEST command to the SLI Layer * @vport: pointer to a host virtual N_Port data structure. * @bmp: Pointer to BPL for SLI command * @inp: Pointer to data buffer for response data. * @outp: Pointer to data buffer that hold the CT command. * @cmpl: completion routine to call when command completes * @ndlp: Destination NPort nodelist entry * * This function as the final part for issuing a CT command. */ static int lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp, struct lpfc_dmabuf *inp, struct lpfc_dmabuf *outp, void (*cmpl)(struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *), struct lpfc_nodelist *ndlp, uint32_t event_tag, uint32_t num_entry, uint32_t tmo, uint8_t retry) { struct lpfc_hba *phba = vport->phba; struct lpfc_iocbq *geniocb; int rc; u16 ulp_context; /* Allocate buffer for command iocb */ geniocb = lpfc_sli_get_iocbq(phba); if (geniocb == NULL) return 1; /* Update the num_entry bde count */ geniocb->num_bdes = num_entry; geniocb->bpl_dmabuf = bmp; /* Save for completion so we can release these resources */ geniocb->cmd_dmabuf = inp; geniocb->rsp_dmabuf = outp; geniocb->event_tag = event_tag; if (!tmo) { /* FC spec states we need 3 * ratov for CT requests */ tmo = (3 * phba->fc_ratov); } if (phba->sli_rev == LPFC_SLI_REV4) ulp_context = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi]; else ulp_context = ndlp->nlp_rpi; lpfc_sli_prep_gen_req(phba, geniocb, bmp, ulp_context, num_entry, tmo); /* Issue GEN REQ IOCB for NPORT <did> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0119 Issue GEN REQ IOCB to NPORT x%x " "Data: x%x x%x\n", ndlp->nlp_DID, geniocb->iotag, vport->port_state); geniocb->cmd_cmpl = cmpl; geniocb->drvrTimeout = tmo + LPFC_DRVR_TIMEOUT; geniocb->vport = vport; geniocb->retry = retry; geniocb->ndlp = lpfc_nlp_get(ndlp); if (!geniocb->ndlp) goto out; rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0); if (rc == IOCB_ERROR) { lpfc_nlp_put(ndlp); goto out; } return 0; out: lpfc_sli_release_iocbq(phba, geniocb); return 1; } /* * lpfc_ct_cmd - Build and issue a CT command * @vport: pointer to a host virtual N_Port data structure. * @inmp: Pointer to data buffer for response data. * @bmp: Pointer to BPL for SLI command * @ndlp: Destination NPort nodelist entry * @cmpl: completion routine to call when command completes * * This function is called for issuing a CT command. */ static int lpfc_ct_cmd(struct lpfc_vport *vport, struct lpfc_dmabuf *inmp, struct lpfc_dmabuf *bmp, struct lpfc_nodelist *ndlp, void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *), uint32_t rsp_size, uint8_t retry) { struct lpfc_hba *phba = vport->phba; struct ulp_bde64 *bpl = (struct ulp_bde64 *) bmp->virt; struct lpfc_dmabuf *outmp; int cnt = 0, status; __be16 cmdcode = ((struct lpfc_sli_ct_request *)inmp->virt)-> CommandResponse.bits.CmdRsp; bpl++; /* Skip past ct request */ /* Put buffer(s) for ct rsp in bpl */ outmp = lpfc_alloc_ct_rsp(phba, cmdcode, bpl, rsp_size, &cnt); if (!outmp) return -ENOMEM; /* * Form the CT IOCB. The total number of BDEs in this IOCB * is the single command plus response count from * lpfc_alloc_ct_rsp. */ cnt += 1; status = lpfc_gen_req(vport, bmp, inmp, outmp, cmpl, ndlp, phba->fc_eventTag, cnt, 0, retry); if (status) { lpfc_free_ct_rsp(phba, outmp); return -ENOMEM; } return 0; } struct lpfc_vport * lpfc_find_vport_by_did(struct lpfc_hba *phba, uint32_t did) { struct lpfc_vport *vport_curr; unsigned long flags; spin_lock_irqsave(&phba->port_list_lock, flags); list_for_each_entry(vport_curr, &phba->port_list, listentry) { if ((vport_curr->fc_myDID) && (vport_curr->fc_myDID == did)) { spin_unlock_irqrestore(&phba->port_list_lock, flags); return vport_curr; } } spin_unlock_irqrestore(&phba->port_list_lock, flags); return NULL; } static void lpfc_prep_node_fc4type(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) { struct lpfc_nodelist *ndlp; if ((vport->port_type != LPFC_NPIV_PORT) || !(vport->ct_flags & FC_CT_RFF_ID) || !vport->cfg_restrict_login) { ndlp = lpfc_setup_disc_node(vport, Did); if (ndlp) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Parse GID_FTrsp: did:x%x flg:x%lx x%x", Did, ndlp->nlp_flag, vport->fc_flag); /* By default, the driver expects to support FCP FC4 */ if (fc4_type == FC_TYPE_FCP) ndlp->nlp_fc4_type |= NLP_FC4_FCP; if (fc4_type == FC_TYPE_NVME) ndlp->nlp_fc4_type |= NLP_FC4_NVME; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0238 Process x%06x NameServer Rsp " "Data: x%lx x%x x%x x%lx x%x\n", Did, ndlp->nlp_flag, ndlp->nlp_fc4_type, ndlp->nlp_state, vport->fc_flag, vport->fc_rscn_id_cnt); /* if ndlp needs to be discovered and prior * state of ndlp hit devloss, change state to * allow rediscovery. */ if (test_bit(NLP_NPR_2B_DISC, &ndlp->nlp_flag) && ndlp->nlp_state == NLP_STE_UNUSED_NODE) { lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } } else { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Skip1 GID_FTrsp: did:x%x flg:x%lx cnt:%d", Did, vport->fc_flag, vport->fc_rscn_id_cnt); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0239 Skip x%06x NameServer Rsp " "Data: x%lx x%x x%px\n", Did, vport->fc_flag, vport->fc_rscn_id_cnt, ndlp); } } else { if (!test_bit(FC_RSCN_MODE, &vport->fc_flag) || lpfc_rscn_payload_check(vport, Did)) { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Query GID_FTrsp: did:x%x flg:x%lx cnt:%d", Did, vport->fc_flag, vport->fc_rscn_id_cnt); /* * This NPortID was previously a FCP/NVMe target, * Don't even bother to send GFF_ID. */ ndlp = lpfc_findnode_did(vport, Did); if (ndlp && (ndlp->nlp_type & (NLP_FCP_TARGET | NLP_NVME_TARGET))) { if (fc4_type == FC_TYPE_FCP) ndlp->nlp_fc4_type |= NLP_FC4_FCP; if (fc4_type == FC_TYPE_NVME) ndlp->nlp_fc4_type |= NLP_FC4_NVME; lpfc_setup_disc_node(vport, Did); } else if (lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, 0, Did) == 0) vport->num_disc_nodes++; else lpfc_setup_disc_node(vport, Did); } else { lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Skip2 GID_FTrsp: did:x%x flg:x%lx cnt:%d", Did, vport->fc_flag, vport->fc_rscn_id_cnt); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0245 Skip x%06x NameServer Rsp " "Data: x%lx x%x\n", Did, vport->fc_flag, vport->fc_rscn_id_cnt); } } } static void lpfc_ns_rsp_audit_did(struct lpfc_vport *vport, uint32_t Did, uint8_t fc4_type) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp = NULL; char *str; if (phba->cfg_ns_query == LPFC_NS_QUERY_GID_FT) str = "GID_FT"; else str = "GID_PT"; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6430 Process %s rsp for %08x type %x %s %s\n", str, Did, fc4_type, (fc4_type == FC_TYPE_FCP) ? "FCP" : " ", (fc4_type == FC_TYPE_NVME) ? "NVME" : " "); /* * To conserve rpi's, filter out addresses for other * vports on the same physical HBAs. */ if (Did != vport->fc_myDID && (!lpfc_find_vport_by_did(phba, Did) || vport->cfg_peer_port_login)) { if (!phba->nvmet_support) { /* FCPI/NVMEI path. Process Did */ lpfc_prep_node_fc4type(vport, Did, fc4_type); return; } /* NVMET path. NVMET only cares about NVMEI nodes. */ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_type != NLP_NVME_INITIATOR || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) continue; if (ndlp->nlp_DID == Did) clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); else set_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } } static int lpfc_ns_rsp(struct lpfc_vport *vport, struct lpfc_dmabuf *mp, uint8_t fc4_type, uint32_t Size) { struct lpfc_sli_ct_request *Response = (struct lpfc_sli_ct_request *) mp->virt; struct lpfc_dmabuf *mlast, *next_mp; uint32_t *ctptr = (uint32_t *) & Response->un.gid.PortType; uint32_t Did, CTentry; int Cnt; struct list_head head; struct lpfc_nodelist *ndlp = NULL; lpfc_set_disctmo(vport); vport->num_disc_nodes = 0; vport->fc_ns_retry = 0; list_add_tail(&head, &mp->list); list_for_each_entry_safe(mp, next_mp, &head, list) { mlast = mp; Cnt = Size > FCELSSIZE ? FCELSSIZE : Size; Size -= Cnt; if (!ctptr) { ctptr = (uint32_t *) mlast->virt; } else Cnt -= 16; /* subtract length of CT header */ /* Loop through entire NameServer list of DIDs */ while (Cnt >= sizeof(uint32_t)) { /* Get next DID from NameServer List */ CTentry = *ctptr++; Did = ((be32_to_cpu(CTentry)) & Mask_DID); lpfc_ns_rsp_audit_did(vport, Did, fc4_type); if (CTentry & (cpu_to_be32(SLI_CT_LAST_ENTRY))) goto nsout1; Cnt -= sizeof(uint32_t); } ctptr = NULL; } /* All GID_FT entries processed. If the driver is running in * in target mode, put impacted nodes into recovery and drop * the RPI to flush outstanding IO. */ if (vport->phba->nvmet_support) { list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) { if (!test_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag)) continue; lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RECOVERY); clear_bit(NLP_NVMET_RECOV, &ndlp->nlp_flag); } } nsout1: list_del(&head); return 0; } static void lpfc_cmpl_ct_cmd_gid_ft(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *outp; struct lpfc_dmabuf *inp; struct lpfc_sli_ct_request *CTrsp; struct lpfc_sli_ct_request *CTreq; struct lpfc_nodelist *ndlp; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); int rc, type; /* First save ndlp, before we overwrite it */ ndlp = cmdiocb->ndlp; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->rsp_iocb = rspiocb; inp = cmdiocb->cmd_dmabuf; outp = cmdiocb->rsp_dmabuf; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT cmpl: status:x%x/x%x rtry:%d", ulp_status, ulp_word4, vport->fc_ns_retry); /* Ignore response if link flipped after this request was made */ if (cmdiocb->event_tag != phba->fc_eventTag) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "9043 Event tag mismatch. Ignoring NS rsp\n"); goto out; } /* Skip processing response on pport if unloading */ if (vport == phba->pport && test_bit(FC_UNLOADING, &vport->load_flag)) { if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) lpfc_els_flush_rscn(vport); goto out; } if (lpfc_els_chk_latt(vport)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0216 Link event during NS query\n"); if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) lpfc_els_flush_rscn(vport); lpfc_vport_set_state(vport, FC_VPORT_FAILED); goto out; } if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0226 NS query failed due to link event: " "ulp_status x%x ulp_word4 x%x fc_flag x%lx " "port_state x%x gidft_inp x%x\n", ulp_status, ulp_word4, vport->fc_flag, vport->port_state, vport->gidft_inp); if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) lpfc_els_flush_rscn(vport); if (vport->gidft_inp) vport->gidft_inp--; goto out; } if (test_and_clear_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) { /* This is a GID_FT completing so the gidft_inp counter was * incremented before the GID_FT was issued to the wire. */ if (vport->gidft_inp) vport->gidft_inp--; /* * Skip processing the NS response * Re-issue the NS cmd */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "0151 Process Deferred RSCN Data: x%lx x%x\n", vport->fc_flag, vport->fc_rscn_id_cnt); lpfc_els_handle_rscn(vport); goto out; } if (ulp_status) { /* Check for retry */ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { if (ulp_status != IOSTAT_LOCAL_REJECT || (ulp_word4 & IOERR_PARAM_MASK) != IOERR_NO_RESOURCES) vport->fc_ns_retry++; type = lpfc_get_gidft_type(vport, cmdiocb); if (type == 0) goto out; /* CT command is being retried */ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_FT, vport->fc_ns_retry, type); if (rc == 0) goto out; else { /* Unable to send NS cmd */ if (vport->gidft_inp) vport->gidft_inp--; } } if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) lpfc_els_flush_rscn(vport); lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0257 GID_FT Query error: 0x%x 0x%x\n", ulp_status, vport->fc_ns_retry); } else { /* Good status, continue checking */ CTreq = (struct lpfc_sli_ct_request *) inp->virt; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; if (CTrsp->CommandResponse.bits.CmdRsp == cpu_to_be16(SLI_CT_RESPONSE_FS_ACC)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0208 NameServer Rsp Data: x%lx x%x " "x%x x%x sz x%x\n", vport->fc_flag, CTreq->un.gid.Fc4Type, vport->num_disc_nodes, vport->gidft_inp, get_job_data_placed(phba, rspiocb)); lpfc_ns_rsp(vport, outp, CTreq->un.gid.Fc4Type, get_job_data_placed(phba, rspiocb)); } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_RJT) { /* NameServer Rsp Error */ if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0269 No NameServer Entries " "Data: x%x x%x x%x x%lx\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT no entry cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0240 NameServer Rsp Error " "Data: x%x x%x x%x x%lx\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT rsp err1 cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } } else { /* NameServer Rsp Error */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0241 NameServer Rsp Error " "Data: x%x x%x x%x x%lx\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_FT rsp err2 cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t) CTrsp->ReasonCode, (uint32_t) CTrsp->Explanation); } if (vport->gidft_inp) vport->gidft_inp--; } lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4216 GID_FT cmpl inp %d disc %d\n", vport->gidft_inp, vport->num_disc_nodes); /* Link up / RSCN discovery */ if ((vport->num_disc_nodes == 0) && (vport->gidft_inp == 0)) { /* * The driver has cycled through all Nports in the RSCN payload. * Complete the handling by cleaning up and marking the * current driver state. */ if (vport->port_state >= LPFC_DISC_AUTH) { if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { lpfc_els_flush_rscn(vport); /* RSCN still */ set_bit(FC_RSCN_MODE, &vport->fc_flag); } else { lpfc_els_flush_rscn(vport); } } lpfc_disc_start(vport); } out: lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; } static void lpfc_cmpl_ct_cmd_gid_pt(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *outp; struct lpfc_dmabuf *inp; struct lpfc_sli_ct_request *CTrsp; struct lpfc_sli_ct_request *CTreq; struct lpfc_nodelist *ndlp; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); int rc; /* First save ndlp, before we overwrite it */ ndlp = cmdiocb->ndlp; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->rsp_iocb = rspiocb; inp = cmdiocb->cmd_dmabuf; outp = cmdiocb->rsp_dmabuf; lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GID_PT cmpl: status:x%x/x%x rtry:%d", ulp_status, ulp_word4, vport->fc_ns_retry); /* Ignore response if link flipped after this request was made */ if (cmdiocb->event_tag != phba->fc_eventTag) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "9044 Event tag mismatch. Ignoring NS rsp\n"); goto out; } /* Skip processing response on pport if unloading */ if (vport == phba->pport && test_bit(FC_UNLOADING, &vport->load_flag)) { if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) lpfc_els_flush_rscn(vport); goto out; } if (lpfc_els_chk_latt(vport)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4108 Link event during NS query\n"); if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) lpfc_els_flush_rscn(vport); lpfc_vport_set_state(vport, FC_VPORT_FAILED); goto out; } if (lpfc_error_lost_link(vport, ulp_status, ulp_word4)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4166 NS query failed due to link event: " "ulp_status x%x ulp_word4 x%x fc_flag x%lx " "port_state x%x gidft_inp x%x\n", ulp_status, ulp_word4, vport->fc_flag, vport->port_state, vport->gidft_inp); if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) lpfc_els_flush_rscn(vport); if (vport->gidft_inp) vport->gidft_inp--; goto out; } if (test_and_clear_bit(FC_RSCN_DEFERRED, &vport->fc_flag)) { /* This is a GID_PT completing so the gidft_inp counter was * incremented before the GID_PT was issued to the wire. */ if (vport->gidft_inp) vport->gidft_inp--; /* * Skip processing the NS response * Re-issue the NS cmd */ lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS, "4167 Process Deferred RSCN Data: x%lx x%x\n", vport->fc_flag, vport->fc_rscn_id_cnt); lpfc_els_handle_rscn(vport); goto out; } if (ulp_status) { /* Check for retry */ if (vport->fc_ns_retry < LPFC_MAX_NS_RETRY) { if (ulp_status != IOSTAT_LOCAL_REJECT || (ulp_word4 & IOERR_PARAM_MASK) != IOERR_NO_RESOURCES) vport->fc_ns_retry++; /* CT command is being retried */ rc = lpfc_ns_cmd(vport, SLI_CTNS_GID_PT, vport->fc_ns_retry, GID_PT_N_PORT); if (rc == 0) goto out; else { /* Unable to send NS cmd */ if (vport->gidft_inp) vport->gidft_inp--; } } if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) lpfc_els_flush_rscn(vport); lpfc_vport_set_state(vport, FC_VPORT_FAILED); lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "4103 GID_FT Query error: 0x%x 0x%x\n", ulp_status, vport->fc_ns_retry); } else { /* Good status, continue checking */ CTreq = (struct lpfc_sli_ct_request *)inp->virt; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "4105 NameServer Rsp Data: x%lx x%x " "x%x x%x sz x%x\n", vport->fc_flag, CTreq->un.gid.Fc4Type, vport->num_disc_nodes, vport->gidft_inp, get_job_data_placed(phba, rspiocb)); lpfc_ns_rsp(vport, outp, CTreq->un.gid.Fc4Type, get_job_data_placed(phba, rspiocb)); } else if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_RJT) { /* NameServer Rsp Error */ if ((CTrsp->ReasonCode == SLI_CT_UNABLE_TO_PERFORM_REQ) && (CTrsp->Explanation == SLI_CT_NO_FC4_TYPES)) { lpfc_printf_vlog( vport, KERN_INFO, LOG_DISCOVERY, "4106 No NameServer Entries " "Data: x%x x%x x%x x%lx\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT no entry cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } else { lpfc_printf_vlog( vport, KERN_INFO, LOG_DISCOVERY, "4107 NameServer Rsp Error " "Data: x%x x%x x%x x%lx\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT rsp err1 cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } } else { /* NameServer Rsp Error */ lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "4109 NameServer Rsp Error " "Data: x%x x%x x%x x%lx\n", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation, vport->fc_flag); lpfc_debugfs_disc_trc( vport, LPFC_DISC_TRC_CT, "GID_PT rsp err2 cmd:x%x rsn:x%x exp:x%x", be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), (uint32_t)CTrsp->ReasonCode, (uint32_t)CTrsp->Explanation); } if (vport->gidft_inp) vport->gidft_inp--; } lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6450 GID_PT cmpl inp %d disc %d\n", vport->gidft_inp, vport->num_disc_nodes); /* Link up / RSCN discovery */ if ((vport->num_disc_nodes == 0) && (vport->gidft_inp == 0)) { /* * The driver has cycled through all Nports in the RSCN payload. * Complete the handling by cleaning up and marking the * current driver state. */ if (vport->port_state >= LPFC_DISC_AUTH) { if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { lpfc_els_flush_rscn(vport); /* RSCN still */ set_bit(FC_RSCN_MODE, &vport->fc_flag); } else { lpfc_els_flush_rscn(vport); } } lpfc_disc_start(vport); } out: lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } static void lpfc_cmpl_ct_cmd_gff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *CTrsp; int did, rc, retry; uint8_t fbits; struct lpfc_nodelist *ndlp = NULL, *free_ndlp = NULL; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); did = ((struct lpfc_sli_ct_request *) inp->virt)->un.gff.PortId; did = be32_to_cpu(did); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GFF_ID cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, did); /* Ignore response if link flipped after this request was made */ if (cmdiocb->event_tag != phba->fc_eventTag) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "9045 Event tag mismatch. Ignoring NS rsp\n"); goto iocb_free; } if (ulp_status == IOSTAT_SUCCESS) { /* Good status, continue checking */ CTrsp = (struct lpfc_sli_ct_request *) outp->virt; fbits = CTrsp->un.gff_acc.fbits[FCP_TYPE_FEATURE_OFFSET]; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6431 Process GFF_ID rsp for %08x " "fbits %02x %s %s\n", did, fbits, (fbits & FC4_FEATURE_INIT) ? "Initiator" : " ", (fbits & FC4_FEATURE_TARGET) ? "Target" : " "); if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) { if ((fbits & FC4_FEATURE_INIT) && !(fbits & FC4_FEATURE_TARGET)) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0270 Skip x%x GFF " "NameServer Rsp Data: (init) " "x%x x%x\n", did, fbits, vport->fc_rscn_id_cnt); goto out; } } } else { /* Check for retry */ if (cmdiocb->retry < LPFC_MAX_NS_RETRY) { retry = 1; if (ulp_status == IOSTAT_LOCAL_REJECT) { switch ((ulp_word4 & IOERR_PARAM_MASK)) { case IOERR_NO_RESOURCES: /* We don't increment the retry * count for this case. */ break; case IOERR_LINK_DOWN: case IOERR_SLI_ABORTED: case IOERR_SLI_DOWN: retry = 0; break; default: cmdiocb->retry++; } } else cmdiocb->retry++; if (retry) { /* CT command is being retried */ rc = lpfc_ns_cmd(vport, SLI_CTNS_GFF_ID, cmdiocb->retry, did); if (rc == 0) { /* success */ free_ndlp = cmdiocb->ndlp; lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); return; } } } lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0267 NameServer GFF Rsp " "x%x Error (%d %d) Data: x%lx x%x\n", did, ulp_status, ulp_word4, vport->fc_flag, vport->fc_rscn_id_cnt); } /* This is a target port, unregistered port, or the GFF_ID failed */ ndlp = lpfc_setup_disc_node(vport, did); if (ndlp) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0242 Process x%x GFF " "NameServer Rsp Data: x%lx x%lx x%x\n", did, ndlp->nlp_flag, vport->fc_flag, vport->fc_rscn_id_cnt); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0243 Skip x%x GFF " "NameServer Rsp Data: x%lx x%x\n", did, vport->fc_flag, vport->fc_rscn_id_cnt); } out: /* Link up / RSCN discovery */ if (vport->num_disc_nodes) vport->num_disc_nodes--; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6451 GFF_ID cmpl inp %d disc %d\n", vport->gidft_inp, vport->num_disc_nodes); if (vport->num_disc_nodes == 0) { /* * The driver has cycled through all Nports in the RSCN payload. * Complete the handling by cleaning up and marking the * current driver state. */ if (vport->port_state >= LPFC_DISC_AUTH) { if (test_bit(FC_RSCN_MODE, &vport->fc_flag)) { lpfc_els_flush_rscn(vport); /* RSCN still */ set_bit(FC_RSCN_MODE, &vport->fc_flag); } else { lpfc_els_flush_rscn(vport); } } lpfc_disc_start(vport); } iocb_free: free_ndlp = cmdiocb->ndlp; lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); return; } static void lpfc_cmpl_ct_cmd_gft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *CTrsp; int did; struct lpfc_nodelist *ndlp = NULL; struct lpfc_nodelist *ns_ndlp = cmdiocb->ndlp; uint32_t fc4_data_0, fc4_data_1; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); did = ((struct lpfc_sli_ct_request *)inp->virt)->un.gft.PortId; did = be32_to_cpu(did); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "GFT_ID cmpl: status:x%x/x%x did:x%x", ulp_status, ulp_word4, did); /* Ignore response if link flipped after this request was made */ if ((uint32_t)cmdiocb->event_tag != phba->fc_eventTag) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "9046 Event tag mismatch. Ignoring NS rsp\n"); goto out; } if (ulp_status == IOSTAT_SUCCESS) { /* Good status, continue checking */ CTrsp = (struct lpfc_sli_ct_request *)outp->virt; fc4_data_0 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[0]); fc4_data_1 = be32_to_cpu(CTrsp->un.gft_acc.fc4_types[1]); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6432 Process GFT_ID rsp for %08x " "Data %08x %08x %s %s\n", did, fc4_data_0, fc4_data_1, (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ? "FCP" : " ", (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ? "NVME" : " "); /* Lookup the NPort_ID queried in the GFT_ID and find the * driver's local node. It's an error if the driver * doesn't have one. */ ndlp = lpfc_findnode_did(vport, did); if (ndlp) { /* The bitmask value for FCP and NVME FCP types is * the same because they are 32 bits distant from * each other in word0 and word0. */ if (fc4_data_0 & LPFC_FC4_TYPE_BITMASK) ndlp->nlp_fc4_type |= NLP_FC4_FCP; if (fc4_data_1 & LPFC_FC4_TYPE_BITMASK) ndlp->nlp_fc4_type |= NLP_FC4_NVME; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "3064 Setting ndlp x%px, DID x%06x " "with FC4 x%08x, Data: x%08x x%08x " "%d\n", ndlp, did, ndlp->nlp_fc4_type, FC_TYPE_FCP, FC_TYPE_NVME, ndlp->nlp_state); if (ndlp->nlp_state == NLP_STE_REG_LOGIN_ISSUE && ndlp->nlp_fc4_type) { ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_PRLI_ISSUE); lpfc_issue_els_prli(vport, ndlp, 0); } else if (!ndlp->nlp_fc4_type) { /* If fc4 type is still unknown, then LOGO */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_NODE, "6443 Sending LOGO ndlp x%px, " "DID x%06x with fc4_type: " "x%08x, state: %d\n", ndlp, did, ndlp->nlp_fc4_type, ndlp->nlp_state); lpfc_issue_els_logo(vport, ndlp, 0); ndlp->nlp_prev_state = NLP_STE_REG_LOGIN_ISSUE; lpfc_nlp_set_state(vport, ndlp, NLP_STE_NPR_NODE); } } } else lpfc_vlog_msg(vport, KERN_WARNING, LOG_DISCOVERY, "3065 GFT_ID status x%08x\n", ulp_status); out: lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ns_ndlp); } static void lpfc_cmpl_ct(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp; struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; struct lpfc_nodelist *ndlp; int cmdcode, rc; uint8_t retry; uint32_t latt; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); /* First save ndlp, before we overwrite it */ ndlp = cmdiocb->ndlp; /* we pass cmdiocb to state machine which needs rspiocb as well */ cmdiocb->rsp_iocb = rspiocb; inp = cmdiocb->cmd_dmabuf; outp = cmdiocb->rsp_dmabuf; cmdcode = be16_to_cpu(((struct lpfc_sli_ct_request *) inp->virt)-> CommandResponse.bits.CmdRsp); CTrsp = (struct lpfc_sli_ct_request *) outp->virt; latt = lpfc_els_chk_latt(vport); /* RFT request completes status <ulp_status> CmdRsp <CmdRsp> */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0209 CT Request completes, latt %d, " "ulp_status x%x CmdRsp x%x, Context x%x, Tag x%x\n", latt, ulp_status, be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp), get_job_ulpcontext(phba, cmdiocb), cmdiocb->iotag); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "CT cmd cmpl: status:x%x/x%x cmd:x%x", ulp_status, ulp_word4, cmdcode); if (ulp_status) { lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0268 NS cmd x%x Error (x%x x%x)\n", cmdcode, ulp_status, ulp_word4); if (ulp_status == IOSTAT_LOCAL_REJECT && (((ulp_word4 & IOERR_PARAM_MASK) == IOERR_SLI_DOWN) || ((ulp_word4 & IOERR_PARAM_MASK) == IOERR_SLI_ABORTED))) goto out; retry = cmdiocb->retry; if (retry >= LPFC_MAX_NS_RETRY) goto out; retry++; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0250 Retrying NS cmd %x\n", cmdcode); rc = lpfc_ns_cmd(vport, cmdcode, retry, 0); if (rc == 0) goto out; } out: /* If the caller wanted a synchronous DA_ID completion, signal the * wait obj and clear flag to reset the vport. */ if (ndlp->save_flags & NLP_WAIT_FOR_DA_ID) { if (ndlp->da_id_waitq) wake_up(ndlp->da_id_waitq); } spin_lock_irq(&ndlp->lock); ndlp->save_flags &= ~NLP_WAIT_FOR_DA_ID; spin_unlock_irq(&ndlp->lock); lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); return; } static void lpfc_cmpl_ct_cmd_rft_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status == IOSTAT_SUCCESS) { struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RFT_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } static void lpfc_cmpl_ct_cmd_rnn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status == IOSTAT_SUCCESS) { struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RNN_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } static void lpfc_cmpl_ct_cmd_rspn_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status == IOSTAT_SUCCESS) { struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RSPN_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } static void lpfc_cmpl_ct_cmd_rsnn_nn(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status == IOSTAT_SUCCESS) { struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *) outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RSNN_NN; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } static void lpfc_cmpl_ct_cmd_da_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; /* even if it fails we will act as though it succeeded. */ vport->ct_flags = 0; lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } static void lpfc_cmpl_ct_cmd_rff_id(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); if (ulp_status == IOSTAT_SUCCESS) { struct lpfc_dmabuf *outp; struct lpfc_sli_ct_request *CTrsp; outp = cmdiocb->rsp_dmabuf; CTrsp = (struct lpfc_sli_ct_request *)outp->virt; if (be16_to_cpu(CTrsp->CommandResponse.bits.CmdRsp) == SLI_CT_RESPONSE_FS_ACC) vport->ct_flags |= FC_CT_RFF_ID; } lpfc_cmpl_ct(phba, cmdiocb, rspiocb); return; } /* * Although the symbolic port name is thought to be an integer * as of January 18, 2016, leave it as a string until more of * the record state becomes defined. */ int lpfc_vport_symbolic_port_name(struct lpfc_vport *vport, char *symbol, size_t size) { int n; /* * Use the lpfc board number as the Symbolic Port * Name object. NPIV is not in play so this integer * value is sufficient and unique per FC-ID. */ n = scnprintf(symbol, size, "%d", vport->phba->brd_no); return n; } int lpfc_vport_symbolic_node_name(struct lpfc_vport *vport, char *symbol, size_t size) { char fwrev[FW_REV_STR_SIZE] = {0}; char tmp[MAXHOSTNAMELEN] = {0}; memset(symbol, 0, size); scnprintf(tmp, sizeof(tmp), "Emulex %s", vport->phba->ModelName); if (strlcat(symbol, tmp, size) >= size) goto buffer_done; lpfc_decode_firmware_rev(vport->phba, fwrev, 0); scnprintf(tmp, sizeof(tmp), " FV%s", fwrev); if (strlcat(symbol, tmp, size) >= size) goto buffer_done; scnprintf(tmp, sizeof(tmp), " DV%s", lpfc_release_version); if (strlcat(symbol, tmp, size) >= size) goto buffer_done; scnprintf(tmp, sizeof(tmp), " HN:%s", vport->phba->os_host_name); if (strlcat(symbol, tmp, size) >= size) goto buffer_done; /* Note :- OS name is "Linux" */ scnprintf(tmp, sizeof(tmp), " OS:%s", init_utsname()->sysname); strlcat(symbol, tmp, size); buffer_done: return strnlen(symbol, size); } static uint32_t lpfc_find_map_node(struct lpfc_vport *vport) { struct lpfc_nodelist *ndlp, *next_ndlp; unsigned long iflags; uint32_t cnt = 0; spin_lock_irqsave(&vport->fc_nodes_list_lock, iflags); list_for_each_entry_safe(ndlp, next_ndlp, &vport->fc_nodes, nlp_listp) { if (ndlp->nlp_type & NLP_FABRIC) continue; if ((ndlp->nlp_state == NLP_STE_MAPPED_NODE) || (ndlp->nlp_state == NLP_STE_UNMAPPED_NODE)) cnt++; } spin_unlock_irqrestore(&vport->fc_nodes_list_lock, iflags); return cnt; } /* * This routine will return the FC4 Type associated with the CT * GID_FT command. */ int lpfc_get_gidft_type(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb) { struct lpfc_sli_ct_request *CtReq; struct lpfc_dmabuf *mp; uint32_t type; mp = cmdiocb->cmd_dmabuf; if (mp == NULL) return 0; CtReq = (struct lpfc_sli_ct_request *)mp->virt; type = (uint32_t)CtReq->un.gid.Fc4Type; if ((type != SLI_CTPT_FCP) && (type != SLI_CTPT_NVME)) return 0; return type; } /* * lpfc_ns_cmd * Description: * Issue Cmd to NameServer * SLI_CTNS_GID_FT * LI_CTNS_RFT_ID */ int lpfc_ns_cmd(struct lpfc_vport *vport, int cmdcode, uint8_t retry, uint32_t context) { struct lpfc_nodelist * ndlp; struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *mp, *bmp; struct lpfc_sli_ct_request *CtReq; struct ulp_bde64 *bpl; void (*cmpl) (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *) = NULL; uint32_t *ptr; uint32_t rsp_size = 1024; size_t size; int rc = 0; ndlp = lpfc_findnode_did(vport, NameServer_DID); if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) { rc=1; goto ns_cmd_exit; } /* fill in BDEs for command */ /* Allocate buffer for command payload */ mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!mp) { rc=2; goto ns_cmd_exit; } INIT_LIST_HEAD(&mp->list); mp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(mp->phys)); if (!mp->virt) { rc=3; goto ns_cmd_free_mp; } /* Allocate buffer for Buffer ptr list */ bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL); if (!bmp) { rc=4; goto ns_cmd_free_mpvirt; } INIT_LIST_HEAD(&bmp->list); bmp->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &(bmp->phys)); if (!bmp->virt) { rc=5; goto ns_cmd_free_bmp; } /* NameServer Req */ lpfc_printf_vlog(vport, KERN_INFO ,LOG_DISCOVERY, "0236 NameServer Req Data: x%x x%lx x%x x%x\n", cmdcode, vport->fc_flag, vport->fc_rscn_id_cnt, context); bpl = (struct ulp_bde64 *) bmp->virt; memset(bpl, 0, sizeof(struct ulp_bde64)); bpl->addrHigh = le32_to_cpu(putPaddrHigh(mp->phys) ); bpl->addrLow = le32_to_cpu(putPaddrLow(mp->phys) ); bpl->tus.f.bdeFlags = 0; if (cmdcode == SLI_CTNS_GID_FT) bpl->tus.f.bdeSize = GID_REQUEST_SZ; else if (cmdcode == SLI_CTNS_GID_PT) bpl->tus.f.bdeSize = GID_REQUEST_SZ; else if (cmdcode == SLI_CTNS_GFF_ID) bpl->tus.f.bdeSize = GFF_REQUEST_SZ; else if (cmdcode == SLI_CTNS_GFT_ID) bpl->tus.f.bdeSize = GFT_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RFT_ID) bpl->tus.f.bdeSize = RFT_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RNN_ID) bpl->tus.f.bdeSize = RNN_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RSPN_ID) bpl->tus.f.bdeSize = RSPN_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RSNN_NN) bpl->tus.f.bdeSize = RSNN_REQUEST_SZ; else if (cmdcode == SLI_CTNS_DA_ID) bpl->tus.f.bdeSize = DA_ID_REQUEST_SZ; else if (cmdcode == SLI_CTNS_RFF_ID) bpl->tus.f.bdeSize = RFF_REQUEST_SZ; else bpl->tus.f.bdeSize = 0; bpl->tus.w = le32_to_cpu(bpl->tus.w); CtReq = (struct lpfc_sli_ct_request *) mp->virt; memset(CtReq, 0, sizeof(struct lpfc_sli_ct_request)); CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; CtReq->RevisionId.bits.InId = 0; CtReq->FsType = SLI_CT_DIRECTORY_SERVICE; CtReq->FsSubType = SLI_CT_DIRECTORY_NAME_SERVER; CtReq->CommandResponse.bits.Size = 0; switch (cmdcode) { case SLI_CTNS_GID_FT: CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_GID_FT); CtReq->un.gid.Fc4Type = context; if (vport->port_state < LPFC_NS_QRY) vport->port_state = LPFC_NS_QRY; lpfc_set_disctmo(vport); cmpl = lpfc_cmpl_ct_cmd_gid_ft; rsp_size = FC_MAX_NS_RSP; break; case SLI_CTNS_GID_PT: CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_GID_PT); CtReq->un.gid.PortType = context; if (vport->port_state < LPFC_NS_QRY) vport->port_state = LPFC_NS_QRY; lpfc_set_disctmo(vport); cmpl = lpfc_cmpl_ct_cmd_gid_pt; rsp_size = FC_MAX_NS_RSP; break; case SLI_CTNS_GFF_ID: CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_GFF_ID); CtReq->un.gff.PortId = cpu_to_be32(context); cmpl = lpfc_cmpl_ct_cmd_gff_id; break; case SLI_CTNS_GFT_ID: CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_GFT_ID); CtReq->un.gft.PortId = cpu_to_be32(context); cmpl = lpfc_cmpl_ct_cmd_gft_id; break; case SLI_CTNS_RFT_ID: vport->ct_flags &= ~FC_CT_RFT_ID; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RFT_ID); CtReq->un.rft.port_id = cpu_to_be32(vport->fc_myDID); /* Register Application Services type if vmid enabled. */ if (phba->cfg_vmid_app_header) CtReq->un.rft.app_serv_reg = cpu_to_be32(RFT_APP_SERV_REG); /* Register FC4 FCP type if enabled. */ if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP) CtReq->un.rft.fcp_reg = cpu_to_be32(RFT_FCP_REG); /* Register NVME type if enabled. */ if (vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH || vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME) CtReq->un.rft.nvme_reg = cpu_to_be32(RFT_NVME_REG); ptr = (uint32_t *)CtReq; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6433 Issue RFT (%s %s %s): %08x %08x %08x " "%08x %08x %08x %08x %08x\n", CtReq->un.rft.fcp_reg ? "FCP" : " ", CtReq->un.rft.nvme_reg ? "NVME" : " ", CtReq->un.rft.app_serv_reg ? "APPS" : " ", *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7)); cmpl = lpfc_cmpl_ct_cmd_rft_id; break; case SLI_CTNS_RNN_ID: vport->ct_flags &= ~FC_CT_RNN_ID; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RNN_ID); CtReq->un.rnn.PortId = cpu_to_be32(vport->fc_myDID); memcpy(CtReq->un.rnn.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); cmpl = lpfc_cmpl_ct_cmd_rnn_id; break; case SLI_CTNS_RSPN_ID: vport->ct_flags &= ~FC_CT_RSPN_ID; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RSPN_ID); CtReq->un.rspn.PortId = cpu_to_be32(vport->fc_myDID); size = sizeof(CtReq->un.rspn.symbname); CtReq->un.rspn.len = lpfc_vport_symbolic_port_name(vport, CtReq->un.rspn.symbname, size); cmpl = lpfc_cmpl_ct_cmd_rspn_id; break; case SLI_CTNS_RSNN_NN: vport->ct_flags &= ~FC_CT_RSNN_NN; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RSNN_NN); memcpy(CtReq->un.rsnn.wwnn, &vport->fc_nodename, sizeof(struct lpfc_name)); size = sizeof(CtReq->un.rsnn.symbname); CtReq->un.rsnn.len = lpfc_vport_symbolic_node_name(vport, CtReq->un.rsnn.symbname, size); cmpl = lpfc_cmpl_ct_cmd_rsnn_nn; break; case SLI_CTNS_DA_ID: /* Implement DA_ID Nameserver request */ CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_DA_ID); CtReq->un.da_id.port_id = cpu_to_be32(vport->fc_myDID); cmpl = lpfc_cmpl_ct_cmd_da_id; break; case SLI_CTNS_RFF_ID: vport->ct_flags &= ~FC_CT_RFF_ID; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(SLI_CTNS_RFF_ID); CtReq->un.rff.PortId = cpu_to_be32(vport->fc_myDID); CtReq->un.rff.fbits = FC4_FEATURE_INIT; /* The driver always supports FC_TYPE_FCP. However, the * caller can specify NVME (type x28) as well. But only * these that FC4 type is supported. */ if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || (vport->cfg_enable_fc4_type == LPFC_ENABLE_NVME)) && (context == FC_TYPE_NVME)) { if ((vport == phba->pport) && phba->nvmet_support) { CtReq->un.rff.fbits = (FC4_FEATURE_TARGET | FC4_FEATURE_NVME_DISC); lpfc_nvmet_update_targetport(phba); } else { lpfc_nvme_update_localport(vport); } CtReq->un.rff.type_code = context; } else if (((vport->cfg_enable_fc4_type == LPFC_ENABLE_BOTH) || (vport->cfg_enable_fc4_type == LPFC_ENABLE_FCP)) && (context == FC_TYPE_FCP)) CtReq->un.rff.type_code = context; else goto ns_cmd_free_bmpvirt; ptr = (uint32_t *)CtReq; lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6434 Issue RFF (%s): %08x %08x %08x %08x " "%08x %08x %08x %08x\n", (context == FC_TYPE_NVME) ? "NVME" : "FCP", *ptr, *(ptr + 1), *(ptr + 2), *(ptr + 3), *(ptr + 4), *(ptr + 5), *(ptr + 6), *(ptr + 7)); cmpl = lpfc_cmpl_ct_cmd_rff_id; break; } /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count * to hold ndlp reference for the corresponding callback function. */ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) { /* On success, The cmpl function will free the buffers */ lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "Issue CT cmd: cmd:x%x did:x%x", cmdcode, ndlp->nlp_DID, 0); return 0; } rc=6; ns_cmd_free_bmpvirt: lpfc_mbuf_free(phba, bmp->virt, bmp->phys); ns_cmd_free_bmp: kfree(bmp); ns_cmd_free_mpvirt: lpfc_mbuf_free(phba, mp->virt, mp->phys); ns_cmd_free_mp: kfree(mp); ns_cmd_exit: lpfc_printf_vlog(vport, KERN_ERR, LOG_TRACE_EVENT, "0266 Issue NameServer Req x%x err %d Data: x%lx " "x%x\n", cmdcode, rc, vport->fc_flag, vport->fc_rscn_id_cnt); return 1; } /** * lpfc_fdmi_rprt_defer - Check for any deferred FDMI RPRT commands * @phba: Pointer to HBA context object. * @mask: Initial port attributes mask * * This function checks to see if any vports have deferred their FDMI RPRT. * A vports RPRT may be deferred if it is issued before the primary ports * RHBA completes. */ static void lpfc_fdmi_rprt_defer(struct lpfc_hba *phba, uint32_t mask) { struct lpfc_vport **vports; struct lpfc_vport *vport; struct lpfc_nodelist *ndlp; int i; set_bit(HBA_RHBA_CMPL, &phba->hba_flag); vports = lpfc_create_vport_work_array(phba); if (vports) { for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) { vport = vports[i]; ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) continue; if (vport->ct_flags & FC_CT_RPRT_DEFER) { vport->ct_flags &= ~FC_CT_RPRT_DEFER; vport->fdmi_port_mask = mask; lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); } } } lpfc_destroy_vport_work_array(phba, vports); } /** * lpfc_cmpl_ct_disc_fdmi - Handle a discovery FDMI completion * @phba: Pointer to HBA context object. * @cmdiocb: Pointer to the command IOCBQ. * @rspiocb: Pointer to the response IOCBQ. * * This function to handle the completion of a driver initiated FDMI * CT command issued during discovery. */ static void lpfc_cmpl_ct_disc_fdmi(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *CTcmd = inp->virt; struct lpfc_sli_ct_request *CTrsp = outp->virt; __be16 fdmi_cmd = CTcmd->CommandResponse.bits.CmdRsp; __be16 fdmi_rsp = CTrsp->CommandResponse.bits.CmdRsp; struct lpfc_nodelist *ndlp, *free_ndlp = NULL; uint32_t latt, cmd, err; u32 ulp_status = get_job_ulpstatus(phba, rspiocb); u32 ulp_word4 = get_job_word4(phba, rspiocb); latt = lpfc_els_chk_latt(vport); lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_CT, "FDMI cmpl: status:x%x/x%x latt:%d", ulp_status, ulp_word4, latt); if (latt || ulp_status) { lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "0229 FDMI cmd %04x failed, latt = %d " "ulp_status: (x%x/x%x), sli_flag x%x\n", be16_to_cpu(fdmi_cmd), latt, ulp_status, ulp_word4, phba->sli.sli_flag); /* Look for a retryable error */ if (ulp_status == IOSTAT_LOCAL_REJECT) { switch ((ulp_word4 & IOERR_PARAM_MASK)) { case IOERR_SLI_ABORTED: case IOERR_SLI_DOWN: /* Driver aborted this IO. No retry as error * is likely Offline->Online or some adapter * error. Recovery will try again, but if port * is not active there's no point to continue * issuing follow up FDMI commands. */ if (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)) { free_ndlp = cmdiocb->ndlp; lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); return; } break; case IOERR_ABORT_IN_PROGRESS: case IOERR_SEQUENCE_TIMEOUT: case IOERR_ILLEGAL_FRAME: case IOERR_NO_RESOURCES: case IOERR_ILLEGAL_COMMAND: cmdiocb->retry++; if (cmdiocb->retry >= LPFC_FDMI_MAX_RETRY) break; /* Retry the same FDMI command */ err = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocb, 0); if (err == IOCB_ERROR) break; return; default: break; } } } free_ndlp = cmdiocb->ndlp; lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(free_ndlp); ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp) return; /* Check for a CT LS_RJT response */ cmd = be16_to_cpu(fdmi_cmd); if (be16_to_cpu(fdmi_rsp) == SLI_CT_RESPONSE_FS_RJT) { /* Log FDMI reject */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS, "0220 FDMI cmd FS_RJT Data: x%x", cmd); /* Should we fallback to FDMI-2 / FDMI-1 ? */ switch (cmd) { case SLI_MGMT_RHBA: if (vport->fdmi_hba_mask == LPFC_FDMI2_HBA_ATTR) { /* Fallback to FDMI-1 for HBA attributes */ vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; /* If HBA attributes are FDMI1, so should * port attributes be for consistency. */ vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; /* Start over */ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); } return; case SLI_MGMT_RPRT: if (vport->port_type != LPFC_PHYSICAL_PORT) { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return; } if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { /* Fallback to FDMI-1 */ vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; /* Start over */ lpfc_fdmi_cmd(vport, ndlp, cmd, 0); return; } if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) { vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; /* Retry the same command */ lpfc_fdmi_cmd(vport, ndlp, cmd, 0); } return; case SLI_MGMT_RPA: /* No retry on Vendor, RPA only done on physical port */ if (phba->link_flag & LS_CT_VEN_RPA) { phba->link_flag &= ~LS_CT_VEN_RPA; if (phba->cmf_active_mode == LPFC_CFG_OFF) return; lpfc_printf_log(phba, KERN_WARNING, LOG_DISCOVERY | LOG_ELS, "6460 VEN FDMI RPA RJT\n"); return; } if (vport->fdmi_port_mask == LPFC_FDMI2_PORT_ATTR) { /* Fallback to FDMI-1 */ vport->fdmi_hba_mask = LPFC_FDMI1_HBA_ATTR; vport->fdmi_port_mask = LPFC_FDMI1_PORT_ATTR; /* Start over */ lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); return; } if (vport->fdmi_port_mask == LPFC_FDMI2_SMART_ATTR) { vport->fdmi_port_mask = LPFC_FDMI2_PORT_ATTR; /* Retry the same command */ lpfc_fdmi_cmd(vport, ndlp, cmd, 0); } return; } } /* * On success, need to cycle thru FDMI registration for discovery * DHBA -> DPRT -> RHBA -> RPA (physical port) * DPRT -> RPRT (vports) */ switch (cmd) { case SLI_MGMT_RHBA: /* Check for any RPRTs deferred till after RHBA completes */ lpfc_fdmi_rprt_defer(phba, vport->fdmi_port_mask); lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, 0); break; case SLI_MGMT_DHBA: lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); break; case SLI_MGMT_DPRT: if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RHBA, 0); } else { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return; /* Only issue a RPRT for the vport if the RHBA * for the physical port completes successfully. * We may have to defer the RPRT accordingly. */ if (test_bit(HBA_RHBA_CMPL, &phba->hba_flag)) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, 0); } else { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "6078 RPRT deferred\n"); vport->ct_flags |= FC_CT_RPRT_DEFER; } } break; case SLI_MGMT_RPA: if (vport->port_type == LPFC_PHYSICAL_PORT && phba->sli4_hba.pc_sli4_params.mi_ver) { /* mi is only for the phyical port, no vports */ if (phba->link_flag & LS_CT_VEN_RPA) { lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY | LOG_ELS | LOG_CGN_MGMT, "6449 VEN RPA FDMI Success\n"); phba->link_flag &= ~LS_CT_VEN_RPA; break; } lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY | LOG_CGN_MGMT, "6210 Issue Vendor MI FDMI %x\n", phba->sli4_hba.pc_sli4_params.mi_ver); /* CGN is only for the physical port, no vports */ if (lpfc_fdmi_cmd(vport, ndlp, cmd, LPFC_FDMI_VENDOR_ATTR_mi) == 0) phba->link_flag |= LS_CT_VEN_RPA; lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY | LOG_ELS, "6458 Send MI FDMI:%x Flag x%x\n", phba->sli4_hba.pc_sli4_params.mi_ver, phba->link_flag); } else { lpfc_printf_log(phba, KERN_INFO, LOG_DISCOVERY | LOG_ELS, "6459 No FDMI VEN MI support - " "RPA Success\n"); } break; } return; } /** * lpfc_fdmi_change_check - Check for changed FDMI parameters * @vport: pointer to a host virtual N_Port data structure. * * Check how many mapped NPorts we are connected to * Check if our hostname changed * Called from hbeat timeout routine to check if any FDMI parameters * changed. If so, re-register those Attributes. */ void lpfc_fdmi_change_check(struct lpfc_vport *vport) { struct lpfc_hba *phba = vport->phba; struct lpfc_nodelist *ndlp; uint16_t cnt; if (!lpfc_is_link_up(phba)) return; /* Must be connected to a Fabric */ if (!test_bit(FC_FABRIC, &vport->fc_flag)) return; ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp) return; /* Check if system hostname changed */ if (strcmp(phba->os_host_name, init_utsname()->nodename)) { memset(phba->os_host_name, 0, sizeof(phba->os_host_name)); scnprintf(phba->os_host_name, sizeof(phba->os_host_name), "%s", init_utsname()->nodename); lpfc_ns_cmd(vport, SLI_CTNS_RSNN_NN, 0, 0); /* Since this effects multiple HBA and PORT attributes, we need * de-register and go thru the whole FDMI registration cycle. * DHBA -> DPRT -> RHBA -> RPA (physical port) * DPRT -> RPRT (vports) */ if (vport->port_type == LPFC_PHYSICAL_PORT) { /* For extra Vendor RPA */ phba->link_flag &= ~LS_CT_VEN_RPA; lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DHBA, 0); } else { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return; lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_DPRT, 0); } /* Since this code path registers all the port attributes * we can just return without further checking. */ return; } if (!(vport->fdmi_port_mask & LPFC_FDMI_PORT_ATTR_num_disc)) return; /* Check if the number of mapped NPorts changed */ cnt = lpfc_find_map_node(vport); if (cnt == vport->fdmi_num_disc) return; if (vport->port_type == LPFC_PHYSICAL_PORT) { lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPA, LPFC_FDMI_PORT_ATTR_num_disc); } else { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return; lpfc_fdmi_cmd(vport, ndlp, SLI_MGMT_RPRT, LPFC_FDMI_PORT_ATTR_num_disc); } } static inline int lpfc_fdmi_set_attr_u32(void *attr, uint16_t attrtype, uint32_t attrval) { struct lpfc_fdmi_attr_u32 *ae = attr; int size = sizeof(*ae); ae->type = cpu_to_be16(attrtype); ae->len = cpu_to_be16(size); ae->value_u32 = cpu_to_be32(attrval); return size; } static inline int lpfc_fdmi_set_attr_wwn(void *attr, uint16_t attrtype, struct lpfc_name *wwn) { struct lpfc_fdmi_attr_wwn *ae = attr; int size = sizeof(*ae); ae->type = cpu_to_be16(attrtype); ae->len = cpu_to_be16(size); /* WWN's assumed to be bytestreams - Big Endian presentation */ memcpy(ae->name, wwn, min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); return size; } static inline int lpfc_fdmi_set_attr_fullwwn(void *attr, uint16_t attrtype, struct lpfc_name *wwnn, struct lpfc_name *wwpn) { struct lpfc_fdmi_attr_fullwwn *ae = attr; u8 *nname = ae->nname; u8 *pname = ae->pname; int size = sizeof(*ae); ae->type = cpu_to_be16(attrtype); ae->len = cpu_to_be16(size); /* WWN's assumed to be bytestreams - Big Endian presentation */ memcpy(nname, wwnn, min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); memcpy(pname, wwpn, min_t(size_t, sizeof(struct lpfc_name), sizeof(__be64))); return size; } static inline int lpfc_fdmi_set_attr_string(void *attr, uint16_t attrtype, char *attrstring) { struct lpfc_fdmi_attr_string *ae = attr; int len, size; /* * We are trusting the caller that if a fdmi string field * is capped at 64 bytes, the caller passes in a string of * 64 bytes or less. */ strscpy(ae->value_string, attrstring, sizeof(ae->value_string)); len = strnlen(ae->value_string, sizeof(ae->value_string)); /* round string length to a 32bit boundary */ len += (len & 3) ? (4 - (len & 3)) : 4; /* size is Type/Len (4 bytes) plus string length */ size = FOURBYTES + len; ae->type = cpu_to_be16(attrtype); ae->len = cpu_to_be16(size); return size; } /* Bitfields for FC4 Types that can be reported */ #define ATTR_FC4_CT 0x00000001 #define ATTR_FC4_FCP 0x00000002 #define ATTR_FC4_NVME 0x00000004 static inline int lpfc_fdmi_set_attr_fc4types(void *attr, uint16_t attrtype, uint32_t typemask) { struct lpfc_fdmi_attr_fc4types *ae = attr; int size = sizeof(*ae); ae->type = cpu_to_be16(attrtype); ae->len = cpu_to_be16(size); if (typemask & ATTR_FC4_FCP) ae->value_types[2] = 0x01; /* Type 0x8 - FCP */ if (typemask & ATTR_FC4_CT) ae->value_types[7] = 0x01; /* Type 0x20 - CT */ if (typemask & ATTR_FC4_NVME) ae->value_types[6] = 0x01; /* Type 0x28 - NVME */ return size; } /* Routines for all individual HBA attributes */ static int lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_wwn(attr, RHBA_NODENAME, &vport->fc_sparam.nodeName); } static int lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport, void *attr) { /* This string MUST be consistent with other FC platforms * supported by Broadcom. */ return lpfc_fdmi_set_attr_string(attr, RHBA_MANUFACTURER, "Emulex Corporation"); } static int lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_string(attr, RHBA_SERIAL_NUMBER, phba->SerialNumber); } static int lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL, phba->ModelName); } static int lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_string(attr, RHBA_MODEL_DESCRIPTION, phba->ModelDesc); } static int lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; lpfc_vpd_t *vp = &phba->vpd; char buf[16] = { 0 }; snprintf(buf, sizeof(buf), "%08x", vp->rev.biuRev); return lpfc_fdmi_set_attr_string(attr, RHBA_HARDWARE_VERSION, buf); } static int lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_string(attr, RHBA_DRIVER_VERSION, lpfc_release_version); } static int lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; char buf[64] = { 0 }; if (phba->sli_rev == LPFC_SLI_REV4) { lpfc_decode_firmware_rev(phba, buf, 1); return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION, buf); } return lpfc_fdmi_set_attr_string(attr, RHBA_OPTION_ROM_VERSION, phba->OptionROMVersion); } static int lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; char buf[64] = { 0 }; lpfc_decode_firmware_rev(phba, buf, 1); return lpfc_fdmi_set_attr_string(attr, RHBA_FIRMWARE_VERSION, buf); } static int lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport, void *attr) { char buf[256] = { 0 }; snprintf(buf, sizeof(buf), "%s %s %s", init_utsname()->sysname, init_utsname()->release, init_utsname()->version); return lpfc_fdmi_set_attr_string(attr, RHBA_OS_NAME_VERSION, buf); } static int lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RHBA_MAX_CT_PAYLOAD_LEN, LPFC_MAX_CT_SIZE); } static int lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport, void *attr) { char buf[256] = { 0 }; lpfc_vport_symbolic_node_name(vport, buf, sizeof(buf)); return lpfc_fdmi_set_attr_string(attr, RHBA_SYM_NODENAME, buf); } static int lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RHBA_VENDOR_INFO, 0); } static int lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport, void *attr) { /* Each driver instance corresponds to a single port */ return lpfc_fdmi_set_attr_u32(attr, RHBA_NUM_PORTS, 1); } static int lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_wwn(attr, RHBA_FABRIC_WWNN, &vport->fabric_nodename); } static int lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_string(attr, RHBA_BIOS_VERSION, phba->BIOSVersion); } static int lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport, void *attr) { /* Driver doesn't have access to this information */ return lpfc_fdmi_set_attr_u32(attr, RHBA_BIOS_STATE, 0); } static int lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_string(attr, RHBA_VENDOR_ID, "EMULEX"); } /* * Routines for all individual PORT attributes */ static int lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; u32 fc4types; fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP); /* Check to see if Firmware supports NVME and on physical port */ if ((phba->sli_rev == LPFC_SLI_REV4) && (vport == phba->pport) && phba->sli4_hba.pc_sli4_params.nvme) fc4types |= ATTR_FC4_NVME; return lpfc_fdmi_set_attr_fc4types(attr, RPRT_SUPPORTED_FC4_TYPES, fc4types); } static int lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; u32 speeds = 0; u32 tcfg; u8 i, cnt; if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { cnt = 0; if (phba->sli_rev == LPFC_SLI_REV4) { tcfg = phba->sli4_hba.conf_trunk; for (i = 0; i < 4; i++, tcfg >>= 1) if (tcfg & 1) cnt++; } if (cnt > 2) { /* 4 lane trunk group */ if (phba->lmt & LMT_64Gb) speeds |= HBA_PORTSPEED_256GFC; if (phba->lmt & LMT_32Gb) speeds |= HBA_PORTSPEED_128GFC; if (phba->lmt & LMT_16Gb) speeds |= HBA_PORTSPEED_64GFC; } else if (cnt) { /* 2 lane trunk group */ if (phba->lmt & LMT_128Gb) speeds |= HBA_PORTSPEED_256GFC; if (phba->lmt & LMT_64Gb) speeds |= HBA_PORTSPEED_128GFC; if (phba->lmt & LMT_32Gb) speeds |= HBA_PORTSPEED_64GFC; if (phba->lmt & LMT_16Gb) speeds |= HBA_PORTSPEED_32GFC; } else { if (phba->lmt & LMT_256Gb) speeds |= HBA_PORTSPEED_256GFC; if (phba->lmt & LMT_128Gb) speeds |= HBA_PORTSPEED_128GFC; if (phba->lmt & LMT_64Gb) speeds |= HBA_PORTSPEED_64GFC; if (phba->lmt & LMT_32Gb) speeds |= HBA_PORTSPEED_32GFC; if (phba->lmt & LMT_16Gb) speeds |= HBA_PORTSPEED_16GFC; if (phba->lmt & LMT_10Gb) speeds |= HBA_PORTSPEED_10GFC; if (phba->lmt & LMT_8Gb) speeds |= HBA_PORTSPEED_8GFC; if (phba->lmt & LMT_4Gb) speeds |= HBA_PORTSPEED_4GFC; if (phba->lmt & LMT_2Gb) speeds |= HBA_PORTSPEED_2GFC; if (phba->lmt & LMT_1Gb) speeds |= HBA_PORTSPEED_1GFC; } } else { /* FCoE links support only one speed */ switch (phba->fc_linkspeed) { case LPFC_ASYNC_LINK_SPEED_10GBPS: speeds = HBA_PORTSPEED_10GE; break; case LPFC_ASYNC_LINK_SPEED_25GBPS: speeds = HBA_PORTSPEED_25GE; break; case LPFC_ASYNC_LINK_SPEED_40GBPS: speeds = HBA_PORTSPEED_40GE; break; case LPFC_ASYNC_LINK_SPEED_100GBPS: speeds = HBA_PORTSPEED_100GE; break; } } return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_SPEED, speeds); } static int lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; u32 speeds = 0; if (!test_bit(HBA_FCOE_MODE, &phba->hba_flag)) { switch (phba->fc_linkspeed) { case LPFC_LINK_SPEED_1GHZ: speeds = HBA_PORTSPEED_1GFC; break; case LPFC_LINK_SPEED_2GHZ: speeds = HBA_PORTSPEED_2GFC; break; case LPFC_LINK_SPEED_4GHZ: speeds = HBA_PORTSPEED_4GFC; break; case LPFC_LINK_SPEED_8GHZ: speeds = HBA_PORTSPEED_8GFC; break; case LPFC_LINK_SPEED_10GHZ: speeds = HBA_PORTSPEED_10GFC; break; case LPFC_LINK_SPEED_16GHZ: speeds = HBA_PORTSPEED_16GFC; break; case LPFC_LINK_SPEED_32GHZ: speeds = HBA_PORTSPEED_32GFC; break; case LPFC_LINK_SPEED_64GHZ: speeds = HBA_PORTSPEED_64GFC; break; case LPFC_LINK_SPEED_128GHZ: speeds = HBA_PORTSPEED_128GFC; break; case LPFC_LINK_SPEED_256GHZ: speeds = HBA_PORTSPEED_256GFC; break; default: speeds = HBA_PORTSPEED_UNKNOWN; break; } } else { switch (phba->fc_linkspeed) { case LPFC_ASYNC_LINK_SPEED_10GBPS: speeds = HBA_PORTSPEED_10GE; break; case LPFC_ASYNC_LINK_SPEED_25GBPS: speeds = HBA_PORTSPEED_25GE; break; case LPFC_ASYNC_LINK_SPEED_40GBPS: speeds = HBA_PORTSPEED_40GE; break; case LPFC_ASYNC_LINK_SPEED_100GBPS: speeds = HBA_PORTSPEED_100GE; break; default: speeds = HBA_PORTSPEED_UNKNOWN; break; } } return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_SPEED, speeds); } static int lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport, void *attr) { struct serv_parm *hsp = (struct serv_parm *)&vport->fc_sparam; return lpfc_fdmi_set_attr_u32(attr, RPRT_MAX_FRAME_SIZE, (((uint32_t)hsp->cmn.bbRcvSizeMsb & 0x0F) << 8) | (uint32_t)hsp->cmn.bbRcvSizeLsb); } static int lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport, void *attr) { struct Scsi_Host *shost = lpfc_shost_from_vport(vport); char buf[64] = { 0 }; snprintf(buf, sizeof(buf), "/sys/class/scsi_host/host%d", shost->host_no); return lpfc_fdmi_set_attr_string(attr, RPRT_OS_DEVICE_NAME, buf); } static int lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport, void *attr) { char buf[64] = { 0 }; scnprintf(buf, sizeof(buf), "%s", vport->phba->os_host_name); return lpfc_fdmi_set_attr_string(attr, RPRT_HOST_NAME, buf); } static int lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_wwn(attr, RPRT_NODENAME, &vport->fc_sparam.nodeName); } static int lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_wwn(attr, RPRT_PORTNAME, &vport->fc_sparam.portName); } static int lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport, void *attr) { char buf[256] = { 0 }; lpfc_vport_symbolic_port_name(vport, buf, sizeof(buf)); return lpfc_fdmi_set_attr_string(attr, RPRT_SYM_PORTNAME, buf); } static int lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_TYPE, (phba->fc_topology == LPFC_TOPOLOGY_LOOP) ? LPFC_FDMI_PORTTYPE_NLPORT : LPFC_FDMI_PORTTYPE_NPORT); } static int lpfc_fdmi_port_attr_class(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RPRT_SUPPORTED_CLASS, FC_COS_CLASS2 | FC_COS_CLASS3); } static int lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_wwn(attr, RPRT_FABRICNAME, &vport->fabric_portname); } static int lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; u32 fc4types; fc4types = (ATTR_FC4_CT | ATTR_FC4_FCP); /* Check to see if NVME is configured or not */ if (vport == phba->pport && phba->cfg_enable_fc4_type & LPFC_ENABLE_NVME) fc4types |= ATTR_FC4_NVME; return lpfc_fdmi_set_attr_fc4types(attr, RPRT_ACTIVE_FC4_TYPES, fc4types); } static int lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_STATE, LPFC_FDMI_PORTSTATE_ONLINE); } static int lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport, void *attr) { vport->fdmi_num_disc = lpfc_find_map_node(vport); return lpfc_fdmi_set_attr_u32(attr, RPRT_DISC_PORT, vport->fdmi_num_disc); } static int lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RPRT_PORT_ID, vport->fc_myDID); } static int lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_SERVICE, "Smart SAN Initiator"); } static int lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_fullwwn(attr, RPRT_SMART_GUID, &vport->fc_sparam.nodeName, &vport->fc_sparam.portName); } static int lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_VERSION, "Smart SAN Version 2.0"); } static int lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; return lpfc_fdmi_set_attr_string(attr, RPRT_SMART_MODEL, phba->ModelName); } static int lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport, void *attr) { /* SRIOV (type 3) is not supported */ return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_PORT_INFO, (vport->vpi) ? 2 /* NPIV */ : 1 /* Physical */); } static int lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_QOS, 0); } static int lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport, void *attr) { return lpfc_fdmi_set_attr_u32(attr, RPRT_SMART_SECURITY, 1); } static int lpfc_fdmi_vendor_attr_mi(struct lpfc_vport *vport, void *attr) { struct lpfc_hba *phba = vport->phba; char buf[32] = { 0 }; sprintf(buf, "ELXE2EM:%04d", phba->sli4_hba.pc_sli4_params.mi_ver); return lpfc_fdmi_set_attr_string(attr, RPRT_VENDOR_MI, buf); } /* RHBA attribute jump table */ static int (*lpfc_fdmi_hba_action[]) (struct lpfc_vport *vport, void *attrbuf) = { /* Action routine Mask bit Attribute type */ lpfc_fdmi_hba_attr_wwnn, /* bit0 RHBA_NODENAME */ lpfc_fdmi_hba_attr_manufacturer, /* bit1 RHBA_MANUFACTURER */ lpfc_fdmi_hba_attr_sn, /* bit2 RHBA_SERIAL_NUMBER */ lpfc_fdmi_hba_attr_model, /* bit3 RHBA_MODEL */ lpfc_fdmi_hba_attr_description, /* bit4 RHBA_MODEL_DESCRIPTION */ lpfc_fdmi_hba_attr_hdw_ver, /* bit5 RHBA_HARDWARE_VERSION */ lpfc_fdmi_hba_attr_drvr_ver, /* bit6 RHBA_DRIVER_VERSION */ lpfc_fdmi_hba_attr_rom_ver, /* bit7 RHBA_OPTION_ROM_VERSION */ lpfc_fdmi_hba_attr_fmw_ver, /* bit8 RHBA_FIRMWARE_VERSION */ lpfc_fdmi_hba_attr_os_ver, /* bit9 RHBA_OS_NAME_VERSION */ lpfc_fdmi_hba_attr_ct_len, /* bit10 RHBA_MAX_CT_PAYLOAD_LEN */ lpfc_fdmi_hba_attr_symbolic_name, /* bit11 RHBA_SYM_NODENAME */ lpfc_fdmi_hba_attr_vendor_info, /* bit12 RHBA_VENDOR_INFO */ lpfc_fdmi_hba_attr_num_ports, /* bit13 RHBA_NUM_PORTS */ lpfc_fdmi_hba_attr_fabric_wwnn, /* bit14 RHBA_FABRIC_WWNN */ lpfc_fdmi_hba_attr_bios_ver, /* bit15 RHBA_BIOS_VERSION */ lpfc_fdmi_hba_attr_bios_state, /* bit16 RHBA_BIOS_STATE */ lpfc_fdmi_hba_attr_vendor_id, /* bit17 RHBA_VENDOR_ID */ }; /* RPA / RPRT attribute jump table */ static int (*lpfc_fdmi_port_action[]) (struct lpfc_vport *vport, void *attrbuf) = { /* Action routine Mask bit Attribute type */ lpfc_fdmi_port_attr_fc4type, /* bit0 RPRT_SUPPORT_FC4_TYPES */ lpfc_fdmi_port_attr_support_speed, /* bit1 RPRT_SUPPORTED_SPEED */ lpfc_fdmi_port_attr_speed, /* bit2 RPRT_PORT_SPEED */ lpfc_fdmi_port_attr_max_frame, /* bit3 RPRT_MAX_FRAME_SIZE */ lpfc_fdmi_port_attr_os_devname, /* bit4 RPRT_OS_DEVICE_NAME */ lpfc_fdmi_port_attr_host_name, /* bit5 RPRT_HOST_NAME */ lpfc_fdmi_port_attr_wwnn, /* bit6 RPRT_NODENAME */ lpfc_fdmi_port_attr_wwpn, /* bit7 RPRT_PORTNAME */ lpfc_fdmi_port_attr_symbolic_name, /* bit8 RPRT_SYM_PORTNAME */ lpfc_fdmi_port_attr_port_type, /* bit9 RPRT_PORT_TYPE */ lpfc_fdmi_port_attr_class, /* bit10 RPRT_SUPPORTED_CLASS */ lpfc_fdmi_port_attr_fabric_wwpn, /* bit11 RPRT_FABRICNAME */ lpfc_fdmi_port_attr_active_fc4type, /* bit12 RPRT_ACTIVE_FC4_TYPES */ lpfc_fdmi_port_attr_port_state, /* bit13 RPRT_PORT_STATE */ lpfc_fdmi_port_attr_num_disc, /* bit14 RPRT_DISC_PORT */ lpfc_fdmi_port_attr_nportid, /* bit15 RPRT_PORT_ID */ lpfc_fdmi_smart_attr_service, /* bit16 RPRT_SMART_SERVICE */ lpfc_fdmi_smart_attr_guid, /* bit17 RPRT_SMART_GUID */ lpfc_fdmi_smart_attr_version, /* bit18 RPRT_SMART_VERSION */ lpfc_fdmi_smart_attr_model, /* bit19 RPRT_SMART_MODEL */ lpfc_fdmi_smart_attr_port_info, /* bit20 RPRT_SMART_PORT_INFO */ lpfc_fdmi_smart_attr_qos, /* bit21 RPRT_SMART_QOS */ lpfc_fdmi_smart_attr_security, /* bit22 RPRT_SMART_SECURITY */ lpfc_fdmi_vendor_attr_mi, /* bit23 RPRT_VENDOR_MI */ }; /** * lpfc_fdmi_cmd - Build and send a FDMI cmd to the specified NPort * @vport: pointer to a host virtual N_Port data structure. * @ndlp: ndlp to send FDMI cmd to (if NULL use FDMI_DID) * @cmdcode: FDMI command to send * @new_mask: Mask of HBA or PORT Attributes to send * * Builds and sends a FDMI command using the CT subsystem. */ int lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode, uint32_t new_mask) { struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *rq, *rsp; struct lpfc_sli_ct_request *CtReq; struct ulp_bde64_le *bde; uint32_t bit_pos; uint32_t size, addsz; uint32_t rsp_size; uint32_t mask; struct lpfc_fdmi_reg_hba *rh; struct lpfc_fdmi_port_entry *pe; struct lpfc_fdmi_reg_portattr *pab = NULL, *base = NULL; struct lpfc_fdmi_attr_block *ab = NULL; int (*func)(struct lpfc_vport *vport, void *attrbuf); void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb); if (!ndlp) return 0; cmpl = lpfc_cmpl_ct_disc_fdmi; /* called from discovery */ /* fill in BDEs for command */ /* Allocate buffer for command payload */ rq = kmalloc(sizeof(*rq), GFP_KERNEL); if (!rq) goto fdmi_cmd_exit; rq->virt = lpfc_mbuf_alloc(phba, 0, &rq->phys); if (!rq->virt) goto fdmi_cmd_free_rq; /* Allocate buffer for Buffer ptr list */ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL); if (!rsp) goto fdmi_cmd_free_rqvirt; rsp->virt = lpfc_mbuf_alloc(phba, 0, &rsp->phys); if (!rsp->virt) goto fdmi_cmd_free_rsp; INIT_LIST_HEAD(&rq->list); INIT_LIST_HEAD(&rsp->list); /* mbuf buffers are 1K in length - aka LPFC_BPL_SIZE */ memset(rq->virt, 0, LPFC_BPL_SIZE); rsp_size = LPFC_BPL_SIZE; /* FDMI request */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0218 FDMI Request x%x mask x%x Data: x%x x%lx x%x\n", cmdcode, new_mask, vport->fdmi_port_mask, vport->fc_flag, vport->port_state); CtReq = (struct lpfc_sli_ct_request *)rq->virt; /* First populate the CT_IU preamble */ CtReq->RevisionId.bits.Revision = SLI_CT_REVISION; CtReq->RevisionId.bits.InId = 0; CtReq->FsType = SLI_CT_MANAGEMENT_SERVICE; CtReq->FsSubType = SLI_CT_FDMI_Subtypes; CtReq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode); size = 0; /* Next fill in the specific FDMI cmd information */ switch (cmdcode) { case SLI_MGMT_RHAT: case SLI_MGMT_RHBA: rh = (struct lpfc_fdmi_reg_hba *)&CtReq->un; /* HBA Identifier */ memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); size += sizeof(struct lpfc_fdmi_hba_ident); if (cmdcode == SLI_MGMT_RHBA) { /* Registered Port List */ /* One entry (port) per adapter */ rh->rpl.EntryCnt = cpu_to_be32(1); memcpy(&rh->rpl.pe.PortName, &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); size += sizeof(struct lpfc_fdmi_reg_port_list); } ab = (struct lpfc_fdmi_attr_block *)((uint8_t *)rh + size); ab->EntryCnt = 0; size += FOURBYTES; /* add length of EntryCnt field */ bit_pos = 0; if (new_mask) mask = new_mask; else mask = vport->fdmi_hba_mask; /* Mask will dictate what attributes to build in the request */ while (mask) { if (mask & 0x1) { func = lpfc_fdmi_hba_action[bit_pos]; addsz = func(vport, ((uint8_t *)rh + size)); if (addsz) { ab->EntryCnt++; size += addsz; } /* check if another attribute fits */ if ((size + FDMI_MAX_ATTRLEN) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) goto hba_out; } mask = mask >> 1; bit_pos++; } hba_out: ab->EntryCnt = cpu_to_be32(ab->EntryCnt); /* Total size */ size += GID_REQUEST_SZ - 4; break; case SLI_MGMT_RPRT: if (vport->port_type != LPFC_PHYSICAL_PORT) { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return 0; } fallthrough; case SLI_MGMT_RPA: /* Store base ptr right after preamble */ base = (struct lpfc_fdmi_reg_portattr *)&CtReq->un; if (cmdcode == SLI_MGMT_RPRT) { rh = (struct lpfc_fdmi_reg_hba *)base; /* HBA Identifier */ memcpy(&rh->hi.PortName, &phba->pport->fc_sparam.portName, sizeof(struct lpfc_name)); pab = (struct lpfc_fdmi_reg_portattr *) ((uint8_t *)base + sizeof(struct lpfc_name)); size += sizeof(struct lpfc_name); } else { pab = base; } memcpy((uint8_t *)&pab->PortName, (uint8_t *)&vport->fc_sparam.portName, sizeof(struct lpfc_name)); pab->ab.EntryCnt = 0; /* add length of name and EntryCnt field */ size += sizeof(struct lpfc_name) + FOURBYTES; bit_pos = 0; if (new_mask) mask = new_mask; else mask = vport->fdmi_port_mask; /* Mask will dictate what attributes to build in the request */ while (mask) { if (mask & 0x1) { func = lpfc_fdmi_port_action[bit_pos]; addsz = func(vport, ((uint8_t *)base + size)); if (addsz) { pab->ab.EntryCnt++; size += addsz; } /* check if another attribute fits */ if ((size + FDMI_MAX_ATTRLEN) > (LPFC_BPL_SIZE - LPFC_CT_PREAMBLE)) goto port_out; } mask = mask >> 1; bit_pos++; } port_out: pab->ab.EntryCnt = cpu_to_be32(pab->ab.EntryCnt); size += GID_REQUEST_SZ - 4; break; case SLI_MGMT_GHAT: case SLI_MGMT_GRPL: rsp_size = FC_MAX_NS_RSP; fallthrough; case SLI_MGMT_DHBA: case SLI_MGMT_DHAT: pe = (struct lpfc_fdmi_port_entry *)&CtReq->un; memcpy((uint8_t *)&pe->PortName, (uint8_t *)&vport->fc_sparam.portName, sizeof(struct lpfc_name)); size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name); break; case SLI_MGMT_GPAT: case SLI_MGMT_GPAS: rsp_size = FC_MAX_NS_RSP; fallthrough; case SLI_MGMT_DPRT: if (vport->port_type != LPFC_PHYSICAL_PORT) { ndlp = lpfc_findnode_did(phba->pport, FDMI_DID); if (!ndlp) return 0; } fallthrough; case SLI_MGMT_DPA: pe = (struct lpfc_fdmi_port_entry *)&CtReq->un; memcpy((uint8_t *)&pe->PortName, (uint8_t *)&vport->fc_sparam.portName, sizeof(struct lpfc_name)); size = GID_REQUEST_SZ - 4 + sizeof(struct lpfc_name); break; case SLI_MGMT_GRHL: size = GID_REQUEST_SZ - 4; break; default: lpfc_printf_vlog(vport, KERN_WARNING, LOG_DISCOVERY, "0298 FDMI cmdcode x%x not supported\n", cmdcode); goto fdmi_cmd_free_rspvirt; } CtReq->CommandResponse.bits.Size = cpu_to_be16(rsp_size); bde = (struct ulp_bde64_le *)rsp->virt; bde->addr_high = cpu_to_le32(putPaddrHigh(rq->phys)); bde->addr_low = cpu_to_le32(putPaddrLow(rq->phys)); bde->type_size = cpu_to_le32(ULP_BDE64_TYPE_BDE_64 << ULP_BDE64_TYPE_SHIFT); bde->type_size |= cpu_to_le32(size); /* * The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count * to hold ndlp reference for the corresponding callback function. */ if (!lpfc_ct_cmd(vport, rq, rsp, ndlp, cmpl, rsp_size, 0)) return 0; fdmi_cmd_free_rspvirt: lpfc_mbuf_free(phba, rsp->virt, rsp->phys); fdmi_cmd_free_rsp: kfree(rsp); fdmi_cmd_free_rqvirt: lpfc_mbuf_free(phba, rq->virt, rq->phys); fdmi_cmd_free_rq: kfree(rq); fdmi_cmd_exit: /* Issue FDMI request failed */ lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "0244 Issue FDMI request failed Data: x%x\n", cmdcode); return 1; } /** * lpfc_delayed_disc_tmo - Timeout handler for delayed discovery timer. * @t: Context object of the timer. * * This function set the WORKER_DELAYED_DISC_TMO flag and wake up * the worker thread. **/ void lpfc_delayed_disc_tmo(struct timer_list *t) { struct lpfc_vport *vport = from_timer(vport, t, delayed_disc_tmo); struct lpfc_hba *phba = vport->phba; uint32_t tmo_posted; unsigned long iflag; spin_lock_irqsave(&vport->work_port_lock, iflag); tmo_posted = vport->work_port_events & WORKER_DELAYED_DISC_TMO; if (!tmo_posted) vport->work_port_events |= WORKER_DELAYED_DISC_TMO; spin_unlock_irqrestore(&vport->work_port_lock, iflag); if (!tmo_posted) lpfc_worker_wake_up(phba); return; } /** * lpfc_delayed_disc_timeout_handler - Function called by worker thread to * handle delayed discovery. * @vport: pointer to a host virtual N_Port data structure. * * This function start nport discovery of the vport. **/ void lpfc_delayed_disc_timeout_handler(struct lpfc_vport *vport) { if (!test_and_clear_bit(FC_DISC_DELAYED, &vport->fc_flag)) return; lpfc_do_scr_ns_plogi(vport->phba, vport); } void lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag) { struct lpfc_sli *psli = &phba->sli; lpfc_vpd_t *vp = &phba->vpd; uint32_t b1, b2, b3, b4, i, rev; char c; uint32_t *ptr, str[4]; uint8_t *fwname; if (phba->sli_rev == LPFC_SLI_REV4) snprintf(fwrevision, FW_REV_STR_SIZE, "%s", vp->rev.opFwName); else if (vp->rev.rBit) { if (psli->sli_flag & LPFC_SLI_ACTIVE) rev = vp->rev.sli2FwRev; else rev = vp->rev.sli1FwRev; b1 = (rev & 0x0000f000) >> 12; b2 = (rev & 0x00000f00) >> 8; b3 = (rev & 0x000000c0) >> 6; b4 = (rev & 0x00000030) >> 4; switch (b4) { case 0: c = 'N'; break; case 1: c = 'A'; break; case 2: c = 'B'; break; case 3: c = 'X'; break; default: c = 0; break; } b4 = (rev & 0x0000000f); if (psli->sli_flag & LPFC_SLI_ACTIVE) fwname = vp->rev.sli2FwName; else fwname = vp->rev.sli1FwName; for (i = 0; i < 16; i++) if (fwname[i] == 0x20) fwname[i] = 0; ptr = (uint32_t*)fwname; for (i = 0; i < 3; i++) str[i] = be32_to_cpu(*ptr++); if (c == 0) { if (flag) sprintf(fwrevision, "%d.%d%d (%s)", b1, b2, b3, (char *)str); else sprintf(fwrevision, "%d.%d%d", b1, b2, b3); } else { if (flag) sprintf(fwrevision, "%d.%d%d%c%d (%s)", b1, b2, b3, c, b4, (char *)str); else sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4); } } else { rev = vp->rev.smFwRev; b1 = (rev & 0xff000000) >> 24; b2 = (rev & 0x00f00000) >> 20; b3 = (rev & 0x000f0000) >> 16; c = (rev & 0x0000ff00) >> 8; b4 = (rev & 0x000000ff); sprintf(fwrevision, "%d.%d%d%c%d", b1, b2, b3, c, b4); } return; } static void lpfc_cmpl_ct_cmd_vmid(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb) { struct lpfc_vport *vport = cmdiocb->vport; struct lpfc_dmabuf *inp = cmdiocb->cmd_dmabuf; struct lpfc_dmabuf *outp = cmdiocb->rsp_dmabuf; struct lpfc_sli_ct_request *ctcmd = inp->virt; struct lpfc_sli_ct_request *ctrsp = outp->virt; __be16 rsp = ctrsp->CommandResponse.bits.CmdRsp; struct app_id_object *app; struct lpfc_nodelist *ndlp = cmdiocb->ndlp; u32 cmd, hash, bucket; struct lpfc_vmid *vmp, *cur; u8 *data = outp->virt; int i; cmd = be16_to_cpu(ctcmd->CommandResponse.bits.CmdRsp); if (cmd == SLI_CTAS_DALLAPP_ID) lpfc_ct_free_iocb(phba, cmdiocb); if (lpfc_els_chk_latt(vport) || get_job_ulpstatus(phba, rspiocb)) { if (cmd != SLI_CTAS_DALLAPP_ID) goto free_res; } /* Check for a CT LS_RJT response */ if (be16_to_cpu(rsp) == SLI_CT_RESPONSE_FS_RJT) { if (cmd != SLI_CTAS_DALLAPP_ID) lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "3306 VMID FS_RJT Data: x%x x%x x%x\n", cmd, ctrsp->ReasonCode, ctrsp->Explanation); if ((cmd != SLI_CTAS_DALLAPP_ID) || (ctrsp->ReasonCode != SLI_CT_UNABLE_TO_PERFORM_REQ) || (ctrsp->Explanation != SLI_CT_APP_ID_NOT_AVAILABLE)) { /* If DALLAPP_ID failed retry later */ if (cmd == SLI_CTAS_DALLAPP_ID) set_bit(FC_DEREGISTER_ALL_APP_ID, &vport->load_flag); goto free_res; } } switch (cmd) { case SLI_CTAS_RAPP_IDENT: app = (struct app_id_object *)(RAPP_IDENT_OFFSET + data); lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "6712 RAPP_IDENT app id %d port id x%x id " "len %d\n", be32_to_cpu(app->app_id), be32_to_cpu(app->port_id), app->obj.entity_id_len); if (app->obj.entity_id_len == 0 || app->port_id == 0) goto free_res; hash = lpfc_vmid_hash_fn(app->obj.entity_id, app->obj.entity_id_len); vmp = lpfc_get_vmid_from_hashtable(vport, hash, app->obj.entity_id); if (vmp) { write_lock(&vport->vmid_lock); vmp->un.app_id = be32_to_cpu(app->app_id); vmp->flag |= LPFC_VMID_REGISTERED; vmp->flag &= ~LPFC_VMID_REQ_REGISTER; write_unlock(&vport->vmid_lock); /* Set IN USE flag */ vport->vmid_flag |= LPFC_VMID_IN_USE; } else { lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "6901 No entry found %s hash %d\n", app->obj.entity_id, hash); } break; case SLI_CTAS_DAPP_IDENT: app = (struct app_id_object *)(DAPP_IDENT_OFFSET + data); lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "6713 DAPP_IDENT app id %d port id x%x\n", be32_to_cpu(app->app_id), be32_to_cpu(app->port_id)); break; case SLI_CTAS_DALLAPP_ID: lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "8856 Deregistered all app ids\n"); read_lock(&vport->vmid_lock); for (i = 0; i < phba->cfg_max_vmid; i++) { vmp = &vport->vmid[i]; if (vmp->flag != LPFC_VMID_SLOT_FREE) memset(vmp, 0, sizeof(struct lpfc_vmid)); } read_unlock(&vport->vmid_lock); /* for all elements in the hash table */ if (!hash_empty(vport->hash_table)) hash_for_each(vport->hash_table, bucket, cur, hnode) hash_del(&cur->hnode); set_bit(FC_ALLOW_VMID, &vport->load_flag); break; default: lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "8857 Invalid command code\n"); } free_res: lpfc_ct_free_iocb(phba, cmdiocb); lpfc_nlp_put(ndlp); } /** * lpfc_vmid_cmd - Build and send a FDMI cmd to the specified NPort * @vport: pointer to a host virtual N_Port data structure. * @cmdcode: application server command code to send * @vmid: pointer to vmid info structure * * Builds and sends a FDMI command using the CT subsystem. */ int lpfc_vmid_cmd(struct lpfc_vport *vport, int cmdcode, struct lpfc_vmid *vmid) { struct lpfc_hba *phba = vport->phba; struct lpfc_dmabuf *mp, *bmp; struct lpfc_sli_ct_request *ctreq; struct ulp_bde64 *bpl; u32 size; u32 rsp_size; u8 *data; struct lpfc_vmid_rapp_ident_list *rap; struct lpfc_vmid_dapp_ident_list *dap; u8 retry = 0; struct lpfc_nodelist *ndlp; void (*cmpl)(struct lpfc_hba *phba, struct lpfc_iocbq *cmdiocb, struct lpfc_iocbq *rspiocb); ndlp = lpfc_findnode_did(vport, FDMI_DID); if (!ndlp || ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) return 0; cmpl = lpfc_cmpl_ct_cmd_vmid; /* fill in BDEs for command */ /* Allocate buffer for command payload */ mp = kmalloc(sizeof(*mp), GFP_KERNEL); if (!mp) goto vmid_free_mp_exit; mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys); if (!mp->virt) goto vmid_free_mp_virt_exit; /* Allocate buffer for Buffer ptr list */ bmp = kmalloc(sizeof(*bmp), GFP_KERNEL); if (!bmp) goto vmid_free_bmp_exit; bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys); if (!bmp->virt) goto vmid_free_bmp_virt_exit; INIT_LIST_HEAD(&mp->list); INIT_LIST_HEAD(&bmp->list); lpfc_printf_vlog(vport, KERN_INFO, LOG_DISCOVERY, "3275 VMID Request Data: x%lx x%x x%x\n", vport->fc_flag, vport->port_state, cmdcode); ctreq = (struct lpfc_sli_ct_request *)mp->virt; data = mp->virt; /* First populate the CT_IU preamble */ memset(data, 0, LPFC_BPL_SIZE); ctreq->RevisionId.bits.Revision = SLI_CT_REVISION; ctreq->RevisionId.bits.InId = 0; ctreq->FsType = SLI_CT_MANAGEMENT_SERVICE; ctreq->FsSubType = SLI_CT_APP_SEV_Subtypes; ctreq->CommandResponse.bits.CmdRsp = cpu_to_be16(cmdcode); rsp_size = LPFC_BPL_SIZE; size = 0; switch (cmdcode) { case SLI_CTAS_RAPP_IDENT: lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "1329 RAPP_IDENT for %s\n", vmid->host_vmid); ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); rap = (struct lpfc_vmid_rapp_ident_list *) (DAPP_IDENT_OFFSET + data); rap->no_of_objects = cpu_to_be32(1); rap->obj[0].entity_id_len = vmid->vmid_len; memcpy(rap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); size = RAPP_IDENT_OFFSET + struct_size(rap, obj, be32_to_cpu(rap->no_of_objects)); retry = 1; break; case SLI_CTAS_GALLAPPIA_ID: ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); size = GALLAPPIA_ID_SIZE; break; case SLI_CTAS_DAPP_IDENT: lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "1469 DAPP_IDENT for %s\n", vmid->host_vmid); ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); dap = (struct lpfc_vmid_dapp_ident_list *) (DAPP_IDENT_OFFSET + data); dap->no_of_objects = cpu_to_be32(1); dap->obj[0].entity_id_len = vmid->vmid_len; memcpy(dap->obj[0].entity_id, vmid->host_vmid, vmid->vmid_len); size = DAPP_IDENT_OFFSET + struct_size(dap, obj, be32_to_cpu(dap->no_of_objects)); write_lock(&vport->vmid_lock); vmid->flag &= ~LPFC_VMID_REGISTERED; write_unlock(&vport->vmid_lock); retry = 1; break; case SLI_CTAS_DALLAPP_ID: ctreq->un.PortID = cpu_to_be32(vport->fc_myDID); size = DALLAPP_ID_SIZE; break; default: lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "7062 VMID cmdcode x%x not supported\n", cmdcode); goto vmid_free_all_mem; } ctreq->CommandResponse.bits.Size = cpu_to_be16(rsp_size); bpl = (struct ulp_bde64 *)bmp->virt; bpl->addrHigh = putPaddrHigh(mp->phys); bpl->addrLow = putPaddrLow(mp->phys); bpl->tus.f.bdeFlags = 0; bpl->tus.f.bdeSize = size; /* The lpfc_ct_cmd/lpfc_get_req shall increment ndlp reference count * to hold ndlp reference for the corresponding callback function. */ if (!lpfc_ct_cmd(vport, mp, bmp, ndlp, cmpl, rsp_size, retry)) return 0; vmid_free_all_mem: lpfc_mbuf_free(phba, bmp->virt, bmp->phys); vmid_free_bmp_virt_exit: kfree(bmp); vmid_free_bmp_exit: lpfc_mbuf_free(phba, mp->virt, mp->phys); vmid_free_mp_virt_exit: kfree(mp); vmid_free_mp_exit: /* Issue CT request failed */ lpfc_printf_vlog(vport, KERN_DEBUG, LOG_DISCOVERY, "3276 VMID CT request failed Data: x%x\n", cmdcode); return -EIO; }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * max9867.h -- MAX9867 ALSA SoC Audio driver * * Copyright 2013-2015 Maxim Integrated Products */ #ifndef _MAX9867_H #define _MAX9867_H /* MAX9867 register space */ #define MAX9867_STATUS 0x00 #define MAX9867_JACKSTATUS 0x01 #define MAX9867_AUXHIGH 0x02 #define MAX9867_AUXLOW 0x03 #define MAX9867_INTEN 0x04 #define MAX9867_SYSCLK 0x05 #define MAX9867_FREQ_MASK 0xF #define MAX9867_PSCLK_SHIFT 0x4 #define MAX9867_PSCLK_WIDTH 0x2 #define MAX9867_PSCLK_MASK (0x03<<MAX9867_PSCLK_SHIFT) #define MAX9867_PSCLK_10_20 0x1 #define MAX9867_PSCLK_20_40 0x2 #define MAX9867_PSCLK_40_60 0x3 #define MAX9867_AUDIOCLKHIGH 0x06 #define MAX9867_NI_HIGH_MASK 0x7F #define MAX9867_NI_LOW_MASK 0xFE #define MAX9867_PLL (1<<7) #define MAX9867_AUDIOCLKLOW 0x07 #define MAX9867_RAPID_LOCK 0x01 #define MAX9867_IFC1A 0x08 #define MAX9867_MASTER (1<<7) #define MAX9867_I2S_DLY (1<<4) #define MAX9867_SDOUT_HIZ (1<<3) #define MAX9867_TDM_MODE (1<<2) #define MAX9867_WCI_MODE (1<<6) #define MAX9867_BCI_MODE (1<<5) #define MAX9867_IFC1B 0x09 #define MAX9867_IFC1B_BCLK_MASK 7 #define MAX9867_IFC1B_64X 0x01 #define MAX9867_IFC1B_48X 0x02 #define MAX9867_IFC1B_PCLK_2 0x04 #define MAX9867_IFC1B_PCLK_4 0x05 #define MAX9867_IFC1B_PCLK_8 0x06 #define MAX9867_IFC1B_PCLK_16 0x07 #define MAX9867_CODECFLTR 0x0a #define MAX9867_CODECFLTR_MODE (1<<7) #define MAX9867_SIDETONE 0x0b #define MAX9867_DACLEVEL 0x0c #define MAX9867_ADCLEVEL 0x0d #define MAX9867_LEFTLINELVL 0x0e #define MAX9867_RIGHTLINELVL 0x0f #define MAX9867_LEFTVOL 0x10 #define MAX9867_RIGHTVOL 0x11 #define MAX9867_LEFTMICGAIN 0x12 #define MAX9867_RIGHTMICGAIN 0x13 #define MAX9867_INPUTCONFIG 0x14 #define MAX9867_MICCONFIG 0x15 #define MAX9867_MODECONFIG 0x16 #define MAX9867_PWRMAN 0x17 #define MAX9867_PWRMAN_SHDN (1<<7) #define MAX9867_REVISION 0xff #define MAX9867_CACHEREGNUM 10 #endif
// SPDX-License-Identifier: GPL-2.0 // #ifndef __SELFTEST_OVERLAYFS_WRAPPERS_H__ #define __SELFTEST_OVERLAYFS_WRAPPERS_H__ #define _GNU_SOURCE #include <linux/types.h> #include <linux/mount.h> #include <sys/syscall.h> static inline int sys_fsopen(const char *fsname, unsigned int flags) { return syscall(__NR_fsopen, fsname, flags); } static inline int sys_fsconfig(int fd, unsigned int cmd, const char *key, const char *value, int aux) { return syscall(__NR_fsconfig, fd, cmd, key, value, aux); } static inline int sys_fsmount(int fd, unsigned int flags, unsigned int attr_flags) { return syscall(__NR_fsmount, fd, flags, attr_flags); } static inline int sys_mount(const char *src, const char *tgt, const char *fst, unsigned long flags, const void *data) { return syscall(__NR_mount, src, tgt, fst, flags, data); } #ifndef MOVE_MOUNT_F_EMPTY_PATH #define MOVE_MOUNT_F_EMPTY_PATH 0x00000004 /* Empty from path permitted */ #endif static inline int sys_move_mount(int from_dfd, const char *from_pathname, int to_dfd, const char *to_pathname, unsigned int flags) { return syscall(__NR_move_mount, from_dfd, from_pathname, to_dfd, to_pathname, flags); } #endif
/* SPDX-License-Identifier: GPL-2.0 */ /* * cxd2880_tnrdmd.h * Sony CXD2880 DVB-T2/T tuner + demodulator driver * common control interface * * Copyright (C) 2016, 2017, 2018 Sony Semiconductor Solutions Corporation */ #ifndef CXD2880_TNRDMD_H #define CXD2880_TNRDMD_H #include <linux/atomic.h> #include "cxd2880_common.h" #include "cxd2880_io.h" #include "cxd2880_dtv.h" #include "cxd2880_dvbt.h" #include "cxd2880_dvbt2.h" #define CXD2880_TNRDMD_MAX_CFG_MEM_COUNT 100 #define slvt_unfreeze_reg(tnr_dmd) ((void)((tnr_dmd)->io->write_reg\ ((tnr_dmd)->io, CXD2880_IO_TGT_DMD, 0x01, 0x00))) #define CXD2880_TNRDMD_INTERRUPT_TYPE_BUF_UNDERFLOW 0x0001 #define CXD2880_TNRDMD_INTERRUPT_TYPE_BUF_OVERFLOW 0x0002 #define CXD2880_TNRDMD_INTERRUPT_TYPE_BUF_ALMOST_EMPTY 0x0004 #define CXD2880_TNRDMD_INTERRUPT_TYPE_BUF_ALMOST_FULL 0x0008 #define CXD2880_TNRDMD_INTERRUPT_TYPE_BUF_RRDY 0x0010 #define CXD2880_TNRDMD_INTERRUPT_TYPE_ILLEGAL_COMMAND 0x0020 #define CXD2880_TNRDMD_INTERRUPT_TYPE_ILLEGAL_ACCESS 0x0040 #define CXD2880_TNRDMD_INTERRUPT_TYPE_CPU_ERROR 0x0100 #define CXD2880_TNRDMD_INTERRUPT_TYPE_LOCK 0x0200 #define CXD2880_TNRDMD_INTERRUPT_TYPE_INV_LOCK 0x0400 #define CXD2880_TNRDMD_INTERRUPT_TYPE_NOOFDM 0x0800 #define CXD2880_TNRDMD_INTERRUPT_TYPE_EWS 0x1000 #define CXD2880_TNRDMD_INTERRUPT_TYPE_EEW 0x2000 #define CXD2880_TNRDMD_INTERRUPT_TYPE_FEC_FAIL 0x4000 #define CXD2880_TNRDMD_INTERRUPT_LOCK_SEL_L1POST_OK 0x01 #define CXD2880_TNRDMD_INTERRUPT_LOCK_SEL_DMD_LOCK 0x02 #define CXD2880_TNRDMD_INTERRUPT_LOCK_SEL_TS_LOCK 0x04 enum cxd2880_tnrdmd_chip_id { CXD2880_TNRDMD_CHIP_ID_UNKNOWN = 0x00, CXD2880_TNRDMD_CHIP_ID_CXD2880_ES1_0X = 0x62, CXD2880_TNRDMD_CHIP_ID_CXD2880_ES1_11 = 0x6a }; #define CXD2880_TNRDMD_CHIP_ID_VALID(chip_id) \ (((chip_id) == CXD2880_TNRDMD_CHIP_ID_CXD2880_ES1_0X) || \ ((chip_id) == CXD2880_TNRDMD_CHIP_ID_CXD2880_ES1_11)) enum cxd2880_tnrdmd_state { CXD2880_TNRDMD_STATE_UNKNOWN, CXD2880_TNRDMD_STATE_SLEEP, CXD2880_TNRDMD_STATE_ACTIVE, CXD2880_TNRDMD_STATE_INVALID }; enum cxd2880_tnrdmd_divermode { CXD2880_TNRDMD_DIVERMODE_SINGLE, CXD2880_TNRDMD_DIVERMODE_MAIN, CXD2880_TNRDMD_DIVERMODE_SUB }; enum cxd2880_tnrdmd_clockmode { CXD2880_TNRDMD_CLOCKMODE_UNKNOWN, CXD2880_TNRDMD_CLOCKMODE_A, CXD2880_TNRDMD_CLOCKMODE_B, CXD2880_TNRDMD_CLOCKMODE_C }; enum cxd2880_tnrdmd_tsout_if { CXD2880_TNRDMD_TSOUT_IF_TS, CXD2880_TNRDMD_TSOUT_IF_SPI, CXD2880_TNRDMD_TSOUT_IF_SDIO }; enum cxd2880_tnrdmd_xtal_share { CXD2880_TNRDMD_XTAL_SHARE_NONE, CXD2880_TNRDMD_XTAL_SHARE_EXTREF, CXD2880_TNRDMD_XTAL_SHARE_MASTER, CXD2880_TNRDMD_XTAL_SHARE_SLAVE }; enum cxd2880_tnrdmd_spectrum_sense { CXD2880_TNRDMD_SPECTRUM_NORMAL, CXD2880_TNRDMD_SPECTRUM_INV }; enum cxd2880_tnrdmd_cfg_id { CXD2880_TNRDMD_CFG_OUTPUT_SEL_MSB, CXD2880_TNRDMD_CFG_TSVALID_ACTIVE_HI, CXD2880_TNRDMD_CFG_TSSYNC_ACTIVE_HI, CXD2880_TNRDMD_CFG_TSERR_ACTIVE_HI, CXD2880_TNRDMD_CFG_LATCH_ON_POSEDGE, CXD2880_TNRDMD_CFG_TSCLK_CONT, CXD2880_TNRDMD_CFG_TSCLK_MASK, CXD2880_TNRDMD_CFG_TSVALID_MASK, CXD2880_TNRDMD_CFG_TSERR_MASK, CXD2880_TNRDMD_CFG_TSERR_VALID_DIS, CXD2880_TNRDMD_CFG_TSPIN_CURRENT, CXD2880_TNRDMD_CFG_TSPIN_PULLUP_MANUAL, CXD2880_TNRDMD_CFG_TSPIN_PULLUP, CXD2880_TNRDMD_CFG_TSCLK_FREQ, CXD2880_TNRDMD_CFG_TSBYTECLK_MANUAL, CXD2880_TNRDMD_CFG_TS_PACKET_GAP, CXD2880_TNRDMD_CFG_TS_BACKWARDS_COMPATIBLE, CXD2880_TNRDMD_CFG_PWM_VALUE, CXD2880_TNRDMD_CFG_INTERRUPT, CXD2880_TNRDMD_CFG_INTERRUPT_LOCK_SEL, CXD2880_TNRDMD_CFG_INTERRUPT_INV_LOCK_SEL, CXD2880_TNRDMD_CFG_TS_BUF_ALMOST_EMPTY_THRS, CXD2880_TNRDMD_CFG_TS_BUF_ALMOST_FULL_THRS, CXD2880_TNRDMD_CFG_TS_BUF_RRDY_THRS, CXD2880_TNRDMD_CFG_FIXED_CLOCKMODE, CXD2880_TNRDMD_CFG_CABLE_INPUT, CXD2880_TNRDMD_CFG_DVBT2_FEF_INTERMITTENT_BASE, CXD2880_TNRDMD_CFG_DVBT2_FEF_INTERMITTENT_LITE, CXD2880_TNRDMD_CFG_BLINDTUNE_DVBT2_FIRST, CXD2880_TNRDMD_CFG_DVBT_BERN_PERIOD, CXD2880_TNRDMD_CFG_DVBT_VBER_PERIOD, CXD2880_TNRDMD_CFG_DVBT_PER_MES, CXD2880_TNRDMD_CFG_DVBT2_BBER_MES, CXD2880_TNRDMD_CFG_DVBT2_LBER_MES, CXD2880_TNRDMD_CFG_DVBT2_PER_MES, }; enum cxd2880_tnrdmd_lock_result { CXD2880_TNRDMD_LOCK_RESULT_NOTDETECT, CXD2880_TNRDMD_LOCK_RESULT_LOCKED, CXD2880_TNRDMD_LOCK_RESULT_UNLOCKED }; enum cxd2880_tnrdmd_gpio_mode { CXD2880_TNRDMD_GPIO_MODE_OUTPUT = 0x00, CXD2880_TNRDMD_GPIO_MODE_INPUT = 0x01, CXD2880_TNRDMD_GPIO_MODE_INT = 0x02, CXD2880_TNRDMD_GPIO_MODE_FEC_FAIL = 0x03, CXD2880_TNRDMD_GPIO_MODE_PWM = 0x04, CXD2880_TNRDMD_GPIO_MODE_EWS = 0x05, CXD2880_TNRDMD_GPIO_MODE_EEW = 0x06 }; enum cxd2880_tnrdmd_serial_ts_clk { CXD2880_TNRDMD_SERIAL_TS_CLK_FULL, CXD2880_TNRDMD_SERIAL_TS_CLK_HALF }; struct cxd2880_tnrdmd_cfg_mem { enum cxd2880_io_tgt tgt; u8 bank; u8 address; u8 value; u8 bit_mask; }; struct cxd2880_tnrdmd_pid_cfg { u8 is_en; u16 pid; }; struct cxd2880_tnrdmd_pid_ftr_cfg { u8 is_negative; struct cxd2880_tnrdmd_pid_cfg pid_cfg[32]; }; struct cxd2880_tnrdmd_lna_thrs { u8 off_on; u8 on_off; }; struct cxd2880_tnrdmd_lna_thrs_tbl_air { struct cxd2880_tnrdmd_lna_thrs thrs[24]; }; struct cxd2880_tnrdmd_lna_thrs_tbl_cable { struct cxd2880_tnrdmd_lna_thrs thrs[32]; }; struct cxd2880_tnrdmd_create_param { enum cxd2880_tnrdmd_tsout_if ts_output_if; u8 en_internal_ldo; enum cxd2880_tnrdmd_xtal_share xtal_share_type; u8 xosc_cap; u8 xosc_i; u8 is_cxd2881gg; u8 stationary_use; }; struct cxd2880_tnrdmd_diver_create_param { enum cxd2880_tnrdmd_tsout_if ts_output_if; u8 en_internal_ldo; u8 xosc_cap_main; u8 xosc_i_main; u8 xosc_i_sub; u8 is_cxd2881gg; u8 stationary_use; }; struct cxd2880_tnrdmd { struct cxd2880_tnrdmd *diver_sub; struct cxd2880_io *io; struct cxd2880_tnrdmd_create_param create_param; enum cxd2880_tnrdmd_divermode diver_mode; enum cxd2880_tnrdmd_clockmode fixed_clk_mode; u8 is_cable_input; u8 en_fef_intmtnt_base; u8 en_fef_intmtnt_lite; u8 blind_tune_dvbt2_first; int (*rf_lvl_cmpstn)(struct cxd2880_tnrdmd *tnr_dmd, int *rf_lvl_db); struct cxd2880_tnrdmd_lna_thrs_tbl_air *lna_thrs_tbl_air; struct cxd2880_tnrdmd_lna_thrs_tbl_cable *lna_thrs_tbl_cable; u8 srl_ts_clk_mod_cnts; enum cxd2880_tnrdmd_serial_ts_clk srl_ts_clk_frq; u8 ts_byte_clk_manual_setting; u8 is_ts_backwards_compatible_mode; struct cxd2880_tnrdmd_cfg_mem cfg_mem[CXD2880_TNRDMD_MAX_CFG_MEM_COUNT]; u8 cfg_mem_last_entry; struct cxd2880_tnrdmd_pid_ftr_cfg pid_ftr_cfg; u8 pid_ftr_cfg_en; void *user; enum cxd2880_tnrdmd_chip_id chip_id; enum cxd2880_tnrdmd_state state; enum cxd2880_tnrdmd_clockmode clk_mode; u32 frequency_khz; enum cxd2880_dtv_sys sys; enum cxd2880_dtv_bandwidth bandwidth; u8 scan_mode; atomic_t cancel; }; int cxd2880_tnrdmd_create(struct cxd2880_tnrdmd *tnr_dmd, struct cxd2880_io *io, struct cxd2880_tnrdmd_create_param *create_param); int cxd2880_tnrdmd_diver_create(struct cxd2880_tnrdmd *tnr_dmd_main, struct cxd2880_io *io_main, struct cxd2880_tnrdmd *tnr_dmd_sub, struct cxd2880_io *io_sub, struct cxd2880_tnrdmd_diver_create_param *create_param); int cxd2880_tnrdmd_init1(struct cxd2880_tnrdmd *tnr_dmd); int cxd2880_tnrdmd_init2(struct cxd2880_tnrdmd *tnr_dmd); int cxd2880_tnrdmd_check_internal_cpu_status(struct cxd2880_tnrdmd *tnr_dmd, u8 *task_completed); int cxd2880_tnrdmd_common_tune_setting1(struct cxd2880_tnrdmd *tnr_dmd, enum cxd2880_dtv_sys sys, u32 frequency_khz, enum cxd2880_dtv_bandwidth bandwidth, u8 one_seg_opt, u8 one_seg_opt_shft_dir); int cxd2880_tnrdmd_common_tune_setting2(struct cxd2880_tnrdmd *tnr_dmd, enum cxd2880_dtv_sys sys, u8 en_fef_intmtnt_ctrl); int cxd2880_tnrdmd_sleep(struct cxd2880_tnrdmd *tnr_dmd); int cxd2880_tnrdmd_set_cfg(struct cxd2880_tnrdmd *tnr_dmd, enum cxd2880_tnrdmd_cfg_id id, int value); int cxd2880_tnrdmd_gpio_set_cfg(struct cxd2880_tnrdmd *tnr_dmd, u8 id, u8 en, enum cxd2880_tnrdmd_gpio_mode mode, u8 open_drain, u8 invert); int cxd2880_tnrdmd_gpio_set_cfg_sub(struct cxd2880_tnrdmd *tnr_dmd, u8 id, u8 en, enum cxd2880_tnrdmd_gpio_mode mode, u8 open_drain, u8 invert); int cxd2880_tnrdmd_gpio_read(struct cxd2880_tnrdmd *tnr_dmd, u8 id, u8 *value); int cxd2880_tnrdmd_gpio_read_sub(struct cxd2880_tnrdmd *tnr_dmd, u8 id, u8 *value); int cxd2880_tnrdmd_gpio_write(struct cxd2880_tnrdmd *tnr_dmd, u8 id, u8 value); int cxd2880_tnrdmd_gpio_write_sub(struct cxd2880_tnrdmd *tnr_dmd, u8 id, u8 value); int cxd2880_tnrdmd_interrupt_read(struct cxd2880_tnrdmd *tnr_dmd, u16 *value); int cxd2880_tnrdmd_interrupt_clear(struct cxd2880_tnrdmd *tnr_dmd, u16 value); int cxd2880_tnrdmd_ts_buf_clear(struct cxd2880_tnrdmd *tnr_dmd, u8 clear_overflow_flag, u8 clear_underflow_flag, u8 clear_buf); int cxd2880_tnrdmd_chip_id(struct cxd2880_tnrdmd *tnr_dmd, enum cxd2880_tnrdmd_chip_id *chip_id); int cxd2880_tnrdmd_set_and_save_reg_bits(struct cxd2880_tnrdmd *tnr_dmd, enum cxd2880_io_tgt tgt, u8 bank, u8 address, u8 value, u8 bit_mask); int cxd2880_tnrdmd_set_scan_mode(struct cxd2880_tnrdmd *tnr_dmd, enum cxd2880_dtv_sys sys, u8 scan_mode_end); int cxd2880_tnrdmd_set_pid_ftr(struct cxd2880_tnrdmd *tnr_dmd, struct cxd2880_tnrdmd_pid_ftr_cfg *pid_ftr_cfg); int cxd2880_tnrdmd_set_rf_lvl_cmpstn(struct cxd2880_tnrdmd *tnr_dmd, int (*rf_lvl_cmpstn) (struct cxd2880_tnrdmd *, int *)); int cxd2880_tnrdmd_set_rf_lvl_cmpstn_sub(struct cxd2880_tnrdmd *tnr_dmd, int (*rf_lvl_cmpstn) (struct cxd2880_tnrdmd *, int *)); int cxd2880_tnrdmd_set_lna_thrs(struct cxd2880_tnrdmd *tnr_dmd, struct cxd2880_tnrdmd_lna_thrs_tbl_air *tbl_air, struct cxd2880_tnrdmd_lna_thrs_tbl_cable *tbl_cable); int cxd2880_tnrdmd_set_lna_thrs_sub(struct cxd2880_tnrdmd *tnr_dmd, struct cxd2880_tnrdmd_lna_thrs_tbl_air *tbl_air, struct cxd2880_tnrdmd_lna_thrs_tbl_cable *tbl_cable); int cxd2880_tnrdmd_set_ts_pin_high_low(struct cxd2880_tnrdmd *tnr_dmd, u8 en, u8 value); int cxd2880_tnrdmd_set_ts_output(struct cxd2880_tnrdmd *tnr_dmd, u8 en); int slvt_freeze_reg(struct cxd2880_tnrdmd *tnr_dmd); #endif
// SPDX-License-Identifier: GPL-2.0 /* * CDX host controller driver for AMD versal-net platform. * * Copyright (C) 2022-2023, Advanced Micro Devices, Inc. */ #include <linux/mod_devicetable.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/cdx/cdx_bus.h> #include <linux/irqdomain.h> #include "cdx_controller.h" #include "../cdx.h" #include "mcdi_functions.h" #include "mcdi.h" static unsigned int cdx_mcdi_rpc_timeout(struct cdx_mcdi *cdx, unsigned int cmd) { return MCDI_RPC_TIMEOUT; } static void cdx_mcdi_request(struct cdx_mcdi *cdx, const struct cdx_dword *hdr, size_t hdr_len, const struct cdx_dword *sdu, size_t sdu_len) { if (cdx_rpmsg_send(cdx, hdr, hdr_len, sdu, sdu_len)) dev_err(&cdx->rpdev->dev, "Failed to send rpmsg data\n"); } static const struct cdx_mcdi_ops mcdi_ops = { .mcdi_rpc_timeout = cdx_mcdi_rpc_timeout, .mcdi_request = cdx_mcdi_request, }; static int cdx_bus_enable(struct cdx_controller *cdx, u8 bus_num) { return cdx_mcdi_bus_enable(cdx->priv, bus_num); } static int cdx_bus_disable(struct cdx_controller *cdx, u8 bus_num) { return cdx_mcdi_bus_disable(cdx->priv, bus_num); } void cdx_rpmsg_post_probe(struct cdx_controller *cdx) { /* Register CDX controller with CDX bus driver */ if (cdx_register_controller(cdx)) dev_err(cdx->dev, "Failed to register CDX controller\n"); } void cdx_rpmsg_pre_remove(struct cdx_controller *cdx) { cdx_unregister_controller(cdx); cdx_mcdi_wait_for_quiescence(cdx->priv, MCDI_RPC_TIMEOUT); } static int cdx_configure_device(struct cdx_controller *cdx, u8 bus_num, u8 dev_num, struct cdx_device_config *dev_config) { u16 msi_index; int ret = 0; u32 data; u64 addr; switch (dev_config->type) { case CDX_DEV_MSI_CONF: msi_index = dev_config->msi.msi_index; data = dev_config->msi.data; addr = dev_config->msi.addr; ret = cdx_mcdi_write_msi(cdx->priv, bus_num, dev_num, msi_index, addr, data); break; case CDX_DEV_RESET_CONF: ret = cdx_mcdi_reset_device(cdx->priv, bus_num, dev_num); break; case CDX_DEV_BUS_MASTER_CONF: ret = cdx_mcdi_bus_master_enable(cdx->priv, bus_num, dev_num, dev_config->bus_master_enable); break; case CDX_DEV_MSI_ENABLE: ret = cdx_mcdi_msi_enable(cdx->priv, bus_num, dev_num, dev_config->msi_enable); break; default: ret = -EINVAL; } return ret; } static int cdx_scan_devices(struct cdx_controller *cdx) { struct cdx_mcdi *cdx_mcdi = cdx->priv; u8 bus_num, dev_num, num_cdx_bus; int ret; /* MCDI FW Read: Fetch the number of CDX buses on this controller */ ret = cdx_mcdi_get_num_buses(cdx_mcdi); if (ret < 0) { dev_err(cdx->dev, "Get number of CDX buses failed: %d\n", ret); return ret; } num_cdx_bus = (u8)ret; for (bus_num = 0; bus_num < num_cdx_bus; bus_num++) { struct device *bus_dev; u8 num_cdx_dev; /* Add the bus on cdx subsystem */ bus_dev = cdx_bus_add(cdx, bus_num); if (!bus_dev) continue; /* MCDI FW Read: Fetch the number of devices present */ ret = cdx_mcdi_get_num_devs(cdx_mcdi, bus_num); if (ret < 0) { dev_err(cdx->dev, "Get devices on CDX bus %d failed: %d\n", bus_num, ret); continue; } num_cdx_dev = (u8)ret; for (dev_num = 0; dev_num < num_cdx_dev; dev_num++) { struct cdx_dev_params dev_params; /* MCDI FW: Get the device config */ ret = cdx_mcdi_get_dev_config(cdx_mcdi, bus_num, dev_num, &dev_params); if (ret) { dev_err(cdx->dev, "CDX device config get failed for %d(bus):%d(dev), %d\n", bus_num, dev_num, ret); continue; } dev_params.cdx = cdx; dev_params.parent = bus_dev; /* Add the device to the cdx bus */ ret = cdx_device_add(&dev_params); if (ret) { dev_err(cdx->dev, "registering cdx dev: %d failed: %d\n", dev_num, ret); continue; } dev_dbg(cdx->dev, "CDX dev: %d on cdx bus: %d created\n", dev_num, bus_num); } } return 0; } static struct cdx_ops cdx_ops = { .bus_enable = cdx_bus_enable, .bus_disable = cdx_bus_disable, .scan = cdx_scan_devices, .dev_configure = cdx_configure_device, }; static int xlnx_cdx_probe(struct platform_device *pdev) { struct cdx_controller *cdx; struct cdx_mcdi *cdx_mcdi; int ret; cdx_mcdi = kzalloc(sizeof(*cdx_mcdi), GFP_KERNEL); if (!cdx_mcdi) return -ENOMEM; /* Store the MCDI ops */ cdx_mcdi->mcdi_ops = &mcdi_ops; /* MCDI FW: Initialize the FW path */ ret = cdx_mcdi_init(cdx_mcdi); if (ret) { dev_err_probe(&pdev->dev, ret, "MCDI Initialization failed\n"); goto mcdi_init_fail; } cdx = kzalloc(sizeof(*cdx), GFP_KERNEL); if (!cdx) { ret = -ENOMEM; goto cdx_alloc_fail; } platform_set_drvdata(pdev, cdx); cdx->dev = &pdev->dev; cdx->priv = cdx_mcdi; cdx->ops = &cdx_ops; /* Create MSI domain */ cdx->msi_domain = cdx_msi_domain_init(&pdev->dev); if (!cdx->msi_domain) { dev_err(&pdev->dev, "cdx_msi_domain_init() failed"); ret = -ENODEV; goto cdx_msi_fail; } ret = cdx_setup_rpmsg(pdev); if (ret) { if (ret != -EPROBE_DEFER) dev_err(&pdev->dev, "Failed to register CDX RPMsg transport\n"); goto cdx_rpmsg_fail; } dev_info(&pdev->dev, "Successfully registered CDX controller with RPMsg as transport\n"); return 0; cdx_rpmsg_fail: irq_domain_remove(cdx->msi_domain); cdx_msi_fail: kfree(cdx); cdx_alloc_fail: cdx_mcdi_finish(cdx_mcdi); mcdi_init_fail: kfree(cdx_mcdi); return ret; } static void xlnx_cdx_remove(struct platform_device *pdev) { struct cdx_controller *cdx = platform_get_drvdata(pdev); struct cdx_mcdi *cdx_mcdi = cdx->priv; cdx_destroy_rpmsg(pdev); irq_domain_remove(cdx->msi_domain); kfree(cdx); cdx_mcdi_finish(cdx_mcdi); kfree(cdx_mcdi); } static const struct of_device_id cdx_match_table[] = { {.compatible = "xlnx,versal-net-cdx",}, { }, }; MODULE_DEVICE_TABLE(of, cdx_match_table); static struct platform_driver cdx_pdriver = { .driver = { .name = "cdx-controller", .pm = NULL, .of_match_table = cdx_match_table, }, .probe = xlnx_cdx_probe, .remove = xlnx_cdx_remove, }; static int __init cdx_controller_init(void) { int ret; ret = platform_driver_register(&cdx_pdriver); if (ret) pr_err("platform_driver_register() failed: %d\n", ret); return ret; } static void __exit cdx_controller_exit(void) { platform_driver_unregister(&cdx_pdriver); } module_init(cdx_controller_init); module_exit(cdx_controller_exit); MODULE_AUTHOR("AMD Inc."); MODULE_DESCRIPTION("CDX controller for AMD devices"); MODULE_LICENSE("GPL"); MODULE_IMPORT_NS("CDX_BUS_CONTROLLER");
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2021 Felix Fietkau <[email protected]> */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/bitfield.h> #include <linux/dma-mapping.h> #include <linux/skbuff.h> #include <linux/of_platform.h> #include <linux/of_address.h> #include <linux/of_reserved_mem.h> #include <linux/mfd/syscon.h> #include <linux/debugfs.h> #include <linux/soc/mediatek/mtk_wed.h> #include <net/flow_offload.h> #include <net/pkt_cls.h> #include "mtk_eth_soc.h" #include "mtk_wed.h" #include "mtk_ppe.h" #include "mtk_wed_wo.h" #define MTK_PCIE_BASE(n) (0x1a143000 + (n) * 0x2000) #define MTK_WED_PKT_SIZE 1920 #define MTK_WED_BUF_SIZE 2048 #define MTK_WED_PAGE_BUF_SIZE 128 #define MTK_WED_BUF_PER_PAGE (PAGE_SIZE / 2048) #define MTK_WED_RX_BUF_PER_PAGE (PAGE_SIZE / MTK_WED_PAGE_BUF_SIZE) #define MTK_WED_RX_RING_SIZE 1536 #define MTK_WED_RX_PG_BM_CNT 8192 #define MTK_WED_AMSDU_BUF_SIZE (PAGE_SIZE << 4) #define MTK_WED_AMSDU_NPAGES 32 #define MTK_WED_TX_RING_SIZE 2048 #define MTK_WED_WDMA_RING_SIZE 1024 #define MTK_WED_MAX_GROUP_SIZE 0x100 #define MTK_WED_VLD_GROUP_SIZE 0x40 #define MTK_WED_PER_GROUP_PKT 128 #define MTK_WED_FBUF_SIZE 128 #define MTK_WED_MIOD_CNT 16 #define MTK_WED_FB_CMD_CNT 1024 #define MTK_WED_RRO_QUE_CNT 8192 #define MTK_WED_MIOD_ENTRY_CNT 128 #define MTK_WED_TX_BM_DMA_SIZE 65536 #define MTK_WED_TX_BM_PKT_CNT 32768 static struct mtk_wed_hw *hw_list[3]; static DEFINE_MUTEX(hw_lock); struct mtk_wed_flow_block_priv { struct mtk_wed_hw *hw; struct net_device *dev; }; static const struct mtk_wed_soc_data mt7622_data = { .regmap = { .tx_bm_tkid = 0x088, .wpdma_rx_ring0 = 0x770, .reset_idx_tx_mask = GENMASK(3, 0), .reset_idx_rx_mask = GENMASK(17, 16), }, .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), .wdma_desc_size = sizeof(struct mtk_wdma_desc), }; static const struct mtk_wed_soc_data mt7986_data = { .regmap = { .tx_bm_tkid = 0x0c8, .wpdma_rx_ring0 = 0x770, .reset_idx_tx_mask = GENMASK(1, 0), .reset_idx_rx_mask = GENMASK(7, 6), }, .tx_ring_desc_size = sizeof(struct mtk_wdma_desc), .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), }; static const struct mtk_wed_soc_data mt7988_data = { .regmap = { .tx_bm_tkid = 0x0c8, .wpdma_rx_ring0 = 0x7d0, .reset_idx_tx_mask = GENMASK(1, 0), .reset_idx_rx_mask = GENMASK(7, 6), }, .tx_ring_desc_size = sizeof(struct mtk_wed_bm_desc), .wdma_desc_size = 2 * sizeof(struct mtk_wdma_desc), }; static void wed_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) { regmap_update_bits(dev->hw->regs, reg, mask | val, val); } static void wed_set(struct mtk_wed_device *dev, u32 reg, u32 mask) { return wed_m32(dev, reg, 0, mask); } static void wed_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) { return wed_m32(dev, reg, mask, 0); } static void wdma_m32(struct mtk_wed_device *dev, u32 reg, u32 mask, u32 val) { wdma_w32(dev, reg, (wdma_r32(dev, reg) & ~mask) | val); } static void wdma_set(struct mtk_wed_device *dev, u32 reg, u32 mask) { wdma_m32(dev, reg, 0, mask); } static void wdma_clr(struct mtk_wed_device *dev, u32 reg, u32 mask) { wdma_m32(dev, reg, mask, 0); } static u32 wifi_r32(struct mtk_wed_device *dev, u32 reg) { return readl(dev->wlan.base + reg); } static void wifi_w32(struct mtk_wed_device *dev, u32 reg, u32 val) { writel(val, dev->wlan.base + reg); } static u32 mtk_wed_read_reset(struct mtk_wed_device *dev) { return wed_r32(dev, MTK_WED_RESET); } static u32 mtk_wdma_read_reset(struct mtk_wed_device *dev) { return wdma_r32(dev, MTK_WDMA_GLO_CFG); } static void mtk_wdma_v3_rx_reset(struct mtk_wed_device *dev) { u32 status; if (!mtk_wed_is_v3_or_greater(dev->hw)) return; wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); if (read_poll_timeout(wdma_r32, status, !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) dev_err(dev->hw->dev, "rx reset failed\n"); if (read_poll_timeout(wdma_r32, status, !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) dev_err(dev->hw->dev, "rx reset failed\n"); wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); if (read_poll_timeout(wdma_r32, status, !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) dev_err(dev->hw->dev, "rx reset failed\n"); if (read_poll_timeout(wdma_r32, status, !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) dev_err(dev->hw->dev, "rx reset failed\n"); /* prefetch FIFO */ wdma_w32(dev, MTK_WDMA_PREF_RX_FIFO_CFG, MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); wdma_clr(dev, MTK_WDMA_PREF_RX_FIFO_CFG, MTK_WDMA_PREF_RX_FIFO_CFG_RING0_CLEAR | MTK_WDMA_PREF_RX_FIFO_CFG_RING1_CLEAR); /* core FIFO */ wdma_w32(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); wdma_clr(dev, MTK_WDMA_XDMA_RX_FIFO_CFG, MTK_WDMA_XDMA_RX_FIFO_CFG_RX_PAR_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_CMD_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_DMAD_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_ARR_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_LEN_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_WID_FIFO_CLEAR | MTK_WDMA_XDMA_RX_FIFO_CFG_RX_BID_FIFO_CLEAR); /* writeback FIFO */ wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); wdma_w32(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(0), MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); wdma_clr(dev, MTK_WDMA_WRBK_RX_FIFO_CFG(1), MTK_WDMA_WRBK_RX_FIFO_CFG_RING_CLEAR); /* prefetch ring status */ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, MTK_WDMA_PREF_SIDX_CFG_RX_RING_CLEAR); /* writeback ring status */ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, MTK_WDMA_WRBK_SIDX_CFG_RX_RING_CLEAR); } static int mtk_wdma_rx_reset(struct mtk_wed_device *dev) { u32 status, mask = MTK_WDMA_GLO_CFG_RX_DMA_BUSY; int i, ret; wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_DMA_EN); ret = readx_poll_timeout(mtk_wdma_read_reset, dev, status, !(status & mask), 0, 10000); if (ret) dev_err(dev->hw->dev, "rx reset failed\n"); mtk_wdma_v3_rx_reset(dev); wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_RX); wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) { if (dev->rx_wdma[i].desc) continue; wdma_w32(dev, MTK_WDMA_RING_RX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); } return ret; } static u32 mtk_wed_check_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) { return !!(wed_r32(dev, reg) & mask); } static int mtk_wed_poll_busy(struct mtk_wed_device *dev, u32 reg, u32 mask) { int sleep = 15000; int timeout = 100 * sleep; u32 val; return read_poll_timeout(mtk_wed_check_busy, val, !val, sleep, timeout, false, dev, reg, mask); } static void mtk_wdma_v3_tx_reset(struct mtk_wed_device *dev) { u32 status; if (!mtk_wed_is_v3_or_greater(dev->hw)) return; wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); if (read_poll_timeout(wdma_r32, status, !(status & MTK_WDMA_PREF_TX_CFG_PREF_BUSY), 0, 10000, false, dev, MTK_WDMA_PREF_TX_CFG)) dev_err(dev->hw->dev, "tx reset failed\n"); if (read_poll_timeout(wdma_r32, status, !(status & MTK_WDMA_PREF_RX_CFG_PREF_BUSY), 0, 10000, false, dev, MTK_WDMA_PREF_RX_CFG)) dev_err(dev->hw->dev, "tx reset failed\n"); wdma_clr(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); wdma_clr(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); if (read_poll_timeout(wdma_r32, status, !(status & MTK_WDMA_WRBK_TX_CFG_WRBK_BUSY), 0, 10000, false, dev, MTK_WDMA_WRBK_TX_CFG)) dev_err(dev->hw->dev, "tx reset failed\n"); if (read_poll_timeout(wdma_r32, status, !(status & MTK_WDMA_WRBK_RX_CFG_WRBK_BUSY), 0, 10000, false, dev, MTK_WDMA_WRBK_RX_CFG)) dev_err(dev->hw->dev, "tx reset failed\n"); /* prefetch FIFO */ wdma_w32(dev, MTK_WDMA_PREF_TX_FIFO_CFG, MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); wdma_clr(dev, MTK_WDMA_PREF_TX_FIFO_CFG, MTK_WDMA_PREF_TX_FIFO_CFG_RING0_CLEAR | MTK_WDMA_PREF_TX_FIFO_CFG_RING1_CLEAR); /* core FIFO */ wdma_w32(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); wdma_clr(dev, MTK_WDMA_XDMA_TX_FIFO_CFG, MTK_WDMA_XDMA_TX_FIFO_CFG_TX_PAR_FIFO_CLEAR | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_CMD_FIFO_CLEAR | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_DMAD_FIFO_CLEAR | MTK_WDMA_XDMA_TX_FIFO_CFG_TX_ARR_FIFO_CLEAR); /* writeback FIFO */ wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); wdma_w32(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(0), MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); wdma_clr(dev, MTK_WDMA_WRBK_TX_FIFO_CFG(1), MTK_WDMA_WRBK_TX_FIFO_CFG_RING_CLEAR); /* prefetch ring status */ wdma_w32(dev, MTK_WDMA_PREF_SIDX_CFG, MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); wdma_clr(dev, MTK_WDMA_PREF_SIDX_CFG, MTK_WDMA_PREF_SIDX_CFG_TX_RING_CLEAR); /* writeback ring status */ wdma_w32(dev, MTK_WDMA_WRBK_SIDX_CFG, MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); wdma_clr(dev, MTK_WDMA_WRBK_SIDX_CFG, MTK_WDMA_WRBK_SIDX_CFG_TX_RING_CLEAR); } static void mtk_wdma_tx_reset(struct mtk_wed_device *dev) { u32 status, mask = MTK_WDMA_GLO_CFG_TX_DMA_BUSY; int i; wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); if (readx_poll_timeout(mtk_wdma_read_reset, dev, status, !(status & mask), 0, 10000)) dev_err(dev->hw->dev, "tx reset failed\n"); mtk_wdma_v3_tx_reset(dev); wdma_w32(dev, MTK_WDMA_RESET_IDX, MTK_WDMA_RESET_IDX_TX); wdma_w32(dev, MTK_WDMA_RESET_IDX, 0); for (i = 0; i < ARRAY_SIZE(dev->tx_wdma); i++) wdma_w32(dev, MTK_WDMA_RING_TX(i) + MTK_WED_RING_OFS_CPU_IDX, 0); } static void mtk_wed_reset(struct mtk_wed_device *dev, u32 mask) { u32 status; wed_w32(dev, MTK_WED_RESET, mask); if (readx_poll_timeout(mtk_wed_read_reset, dev, status, !(status & mask), 0, 1000)) WARN_ON_ONCE(1); } static u32 mtk_wed_wo_read_status(struct mtk_wed_device *dev) { return wed_r32(dev, MTK_WED_SCR0 + 4 * MTK_WED_DUMMY_CR_WO_STATUS); } static void mtk_wed_wo_reset(struct mtk_wed_device *dev) { struct mtk_wed_wo *wo = dev->hw->wed_wo; u8 state = MTK_WED_WO_STATE_DISABLE; void __iomem *reg; u32 val; mtk_wdma_tx_reset(dev); mtk_wed_reset(dev, MTK_WED_RESET_WED); if (mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, MTK_WED_WO_CMD_CHANGE_STATE, &state, sizeof(state), false)) return; if (readx_poll_timeout(mtk_wed_wo_read_status, dev, val, val == MTK_WED_WOIF_DISABLE_DONE, 100, MTK_WOCPU_TIMEOUT)) dev_err(dev->hw->dev, "failed to disable wed-wo\n"); reg = ioremap(MTK_WED_WO_CPU_MCUSYS_RESET_ADDR, 4); val = readl(reg); switch (dev->hw->index) { case 0: val |= MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; writel(val, reg); val &= ~MTK_WED_WO_CPU_WO0_MCUSYS_RESET_MASK; writel(val, reg); break; case 1: val |= MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; writel(val, reg); val &= ~MTK_WED_WO_CPU_WO1_MCUSYS_RESET_MASK; writel(val, reg); break; default: break; } iounmap(reg); } void mtk_wed_fe_reset(void) { int i; mutex_lock(&hw_lock); for (i = 0; i < ARRAY_SIZE(hw_list); i++) { struct mtk_wed_hw *hw = hw_list[i]; struct mtk_wed_device *dev; int err; if (!hw) break; dev = hw->wed_dev; if (!dev || !dev->wlan.reset) continue; /* reset callback blocks until WLAN reset is completed */ err = dev->wlan.reset(dev); if (err) dev_err(dev->dev, "wlan reset failed: %d\n", err); } mutex_unlock(&hw_lock); } void mtk_wed_fe_reset_complete(void) { int i; mutex_lock(&hw_lock); for (i = 0; i < ARRAY_SIZE(hw_list); i++) { struct mtk_wed_hw *hw = hw_list[i]; struct mtk_wed_device *dev; if (!hw) break; dev = hw->wed_dev; if (!dev || !dev->wlan.reset_complete) continue; dev->wlan.reset_complete(dev); } mutex_unlock(&hw_lock); } static struct mtk_wed_hw * mtk_wed_assign(struct mtk_wed_device *dev) { struct mtk_wed_hw *hw; int i; if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { hw = hw_list[pci_domain_nr(dev->wlan.pci_dev->bus)]; if (!hw) return NULL; if (!hw->wed_dev) goto out; if (mtk_wed_is_v1(hw)) return NULL; /* MT7986 WED devices do not have any pcie slot restrictions */ } /* MT7986 PCIE or AXI */ for (i = 0; i < ARRAY_SIZE(hw_list); i++) { hw = hw_list[i]; if (hw && !hw->wed_dev) goto out; } return NULL; out: hw->wed_dev = dev; return hw; } static int mtk_wed_amsdu_buffer_alloc(struct mtk_wed_device *dev) { struct mtk_wed_hw *hw = dev->hw; struct mtk_wed_amsdu *wed_amsdu; int i; if (!mtk_wed_is_v3_or_greater(hw)) return 0; wed_amsdu = devm_kcalloc(hw->dev, MTK_WED_AMSDU_NPAGES, sizeof(*wed_amsdu), GFP_KERNEL); if (!wed_amsdu) return -ENOMEM; for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { void *ptr; /* each segment is 64K */ ptr = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO | __GFP_COMP | GFP_DMA32, get_order(MTK_WED_AMSDU_BUF_SIZE)); if (!ptr) goto error; wed_amsdu[i].txd = ptr; wed_amsdu[i].txd_phy = dma_map_single(hw->dev, ptr, MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); if (dma_mapping_error(hw->dev, wed_amsdu[i].txd_phy)) goto error; } dev->hw->wed_amsdu = wed_amsdu; return 0; error: for (i--; i >= 0; i--) dma_unmap_single(hw->dev, wed_amsdu[i].txd_phy, MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); return -ENOMEM; } static void mtk_wed_amsdu_free_buffer(struct mtk_wed_device *dev) { struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; int i; if (!wed_amsdu) return; for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) { dma_unmap_single(dev->hw->dev, wed_amsdu[i].txd_phy, MTK_WED_AMSDU_BUF_SIZE, DMA_TO_DEVICE); free_pages((unsigned long)wed_amsdu[i].txd, get_order(MTK_WED_AMSDU_BUF_SIZE)); } } static int mtk_wed_amsdu_init(struct mtk_wed_device *dev) { struct mtk_wed_amsdu *wed_amsdu = dev->hw->wed_amsdu; int i, ret; if (!wed_amsdu) return 0; for (i = 0; i < MTK_WED_AMSDU_NPAGES; i++) wed_w32(dev, MTK_WED_AMSDU_HIFTXD_BASE_L(i), wed_amsdu[i].txd_phy); /* init all sta parameter */ wed_w32(dev, MTK_WED_AMSDU_STA_INFO_INIT, MTK_WED_AMSDU_STA_RMVL | MTK_WED_AMSDU_STA_WTBL_HDRT_MODE | FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_LEN, dev->wlan.amsdu_max_len >> 8) | FIELD_PREP(MTK_WED_AMSDU_STA_MAX_AMSDU_NUM, dev->wlan.amsdu_max_subframes)); wed_w32(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT); ret = mtk_wed_poll_busy(dev, MTK_WED_AMSDU_STA_INFO, MTK_WED_AMSDU_STA_INFO_DO_INIT); if (ret) { dev_err(dev->hw->dev, "amsdu initialization failed\n"); return ret; } /* init partial amsdu offload txd src */ wed_set(dev, MTK_WED_AMSDU_HIFTXD_CFG, FIELD_PREP(MTK_WED_AMSDU_HIFTXD_SRC, dev->hw->index)); /* init qmem */ wed_set(dev, MTK_WED_AMSDU_PSE, MTK_WED_AMSDU_PSE_RESET); ret = mtk_wed_poll_busy(dev, MTK_WED_MON_AMSDU_QMEM_STS1, BIT(29)); if (ret) { pr_info("%s: amsdu qmem initialization failed\n", __func__); return ret; } /* eagle E1 PCIE1 tx ring 22 flow control issue */ if (dev->wlan.id == 0x7991) wed_clr(dev, MTK_WED_AMSDU_FIFO, MTK_WED_AMSDU_IS_PRIOR0_RING); wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); return 0; } static int mtk_wed_tx_buffer_alloc(struct mtk_wed_device *dev) { u32 desc_size = dev->hw->soc->tx_ring_desc_size; int i, page_idx = 0, n_pages, ring_size; int token = dev->wlan.token_start; struct mtk_wed_buf *page_list; dma_addr_t desc_phys; void *desc_ptr; if (!mtk_wed_is_v3_or_greater(dev->hw)) { ring_size = dev->wlan.nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); dev->tx_buf_ring.size = ring_size; } else { dev->tx_buf_ring.size = MTK_WED_TX_BM_DMA_SIZE; ring_size = MTK_WED_TX_BM_PKT_CNT; } n_pages = dev->tx_buf_ring.size / MTK_WED_BUF_PER_PAGE; page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); if (!page_list) return -ENOMEM; dev->tx_buf_ring.pages = page_list; desc_ptr = dma_alloc_coherent(dev->hw->dev, dev->tx_buf_ring.size * desc_size, &desc_phys, GFP_KERNEL); if (!desc_ptr) return -ENOMEM; dev->tx_buf_ring.desc = desc_ptr; dev->tx_buf_ring.desc_phys = desc_phys; for (i = 0; i < ring_size; i += MTK_WED_BUF_PER_PAGE) { dma_addr_t page_phys, buf_phys; struct page *page; void *buf; int s; page = __dev_alloc_page(GFP_KERNEL); if (!page) return -ENOMEM; page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev->hw->dev, page_phys)) { __free_page(page); return -ENOMEM; } page_list[page_idx].p = page; page_list[page_idx++].phy_addr = page_phys; dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, DMA_BIDIRECTIONAL); buf = page_to_virt(page); buf_phys = page_phys; for (s = 0; s < MTK_WED_BUF_PER_PAGE; s++) { struct mtk_wdma_desc *desc = desc_ptr; u32 ctrl; desc->buf0 = cpu_to_le32(buf_phys); if (!mtk_wed_is_v3_or_greater(dev->hw)) { u32 txd_size; txd_size = dev->wlan.init_buf(buf, buf_phys, token++); desc->buf1 = cpu_to_le32(buf_phys + txd_size); ctrl = FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN0, txd_size); if (mtk_wed_is_v1(dev->hw)) ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG1 | FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1, MTK_WED_BUF_SIZE - txd_size); else ctrl |= MTK_WDMA_DESC_CTRL_LAST_SEG0 | FIELD_PREP(MTK_WDMA_DESC_CTRL_LEN1_V2, MTK_WED_BUF_SIZE - txd_size); desc->info = 0; } else { ctrl = token << 16 | TX_DMA_PREP_ADDR64(buf_phys); } desc->ctrl = cpu_to_le32(ctrl); desc_ptr += desc_size; buf += MTK_WED_BUF_SIZE; buf_phys += MTK_WED_BUF_SIZE; } dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, DMA_BIDIRECTIONAL); } return 0; } static void mtk_wed_free_tx_buffer(struct mtk_wed_device *dev) { struct mtk_wed_buf *page_list = dev->tx_buf_ring.pages; struct mtk_wed_hw *hw = dev->hw; int i, page_idx = 0; if (!page_list) return; if (!dev->tx_buf_ring.desc) goto free_pagelist; for (i = 0; i < dev->tx_buf_ring.size; i += MTK_WED_BUF_PER_PAGE) { dma_addr_t page_phy = page_list[page_idx].phy_addr; void *page = page_list[page_idx++].p; if (!page) break; dma_unmap_page(dev->hw->dev, page_phy, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(page); } dma_free_coherent(dev->hw->dev, dev->tx_buf_ring.size * hw->soc->tx_ring_desc_size, dev->tx_buf_ring.desc, dev->tx_buf_ring.desc_phys); free_pagelist: kfree(page_list); } static int mtk_wed_hwrro_buffer_alloc(struct mtk_wed_device *dev) { int n_pages = MTK_WED_RX_PG_BM_CNT / MTK_WED_RX_BUF_PER_PAGE; struct mtk_wed_buf *page_list; struct mtk_wed_bm_desc *desc; dma_addr_t desc_phys; int i, page_idx = 0; if (!dev->wlan.hw_rro) return 0; page_list = kcalloc(n_pages, sizeof(*page_list), GFP_KERNEL); if (!page_list) return -ENOMEM; dev->hw_rro.size = dev->wlan.rx_nbuf & ~(MTK_WED_BUF_PER_PAGE - 1); dev->hw_rro.pages = page_list; desc = dma_alloc_coherent(dev->hw->dev, dev->wlan.rx_nbuf * sizeof(*desc), &desc_phys, GFP_KERNEL); if (!desc) return -ENOMEM; dev->hw_rro.desc = desc; dev->hw_rro.desc_phys = desc_phys; for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { dma_addr_t page_phys, buf_phys; struct page *page; int s; page = __dev_alloc_page(GFP_KERNEL); if (!page) return -ENOMEM; page_phys = dma_map_page(dev->hw->dev, page, 0, PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev->hw->dev, page_phys)) { __free_page(page); return -ENOMEM; } page_list[page_idx].p = page; page_list[page_idx++].phy_addr = page_phys; dma_sync_single_for_cpu(dev->hw->dev, page_phys, PAGE_SIZE, DMA_BIDIRECTIONAL); buf_phys = page_phys; for (s = 0; s < MTK_WED_RX_BUF_PER_PAGE; s++) { desc->buf0 = cpu_to_le32(buf_phys); desc->token = cpu_to_le32(RX_DMA_PREP_ADDR64(buf_phys)); buf_phys += MTK_WED_PAGE_BUF_SIZE; desc++; } dma_sync_single_for_device(dev->hw->dev, page_phys, PAGE_SIZE, DMA_BIDIRECTIONAL); } return 0; } static int mtk_wed_rx_buffer_alloc(struct mtk_wed_device *dev) { struct mtk_wed_bm_desc *desc; dma_addr_t desc_phys; dev->rx_buf_ring.size = dev->wlan.rx_nbuf; desc = dma_alloc_coherent(dev->hw->dev, dev->wlan.rx_nbuf * sizeof(*desc), &desc_phys, GFP_KERNEL); if (!desc) return -ENOMEM; dev->rx_buf_ring.desc = desc; dev->rx_buf_ring.desc_phys = desc_phys; dev->wlan.init_rx_buf(dev, dev->wlan.rx_npkt); return mtk_wed_hwrro_buffer_alloc(dev); } static void mtk_wed_hwrro_free_buffer(struct mtk_wed_device *dev) { struct mtk_wed_buf *page_list = dev->hw_rro.pages; struct mtk_wed_bm_desc *desc = dev->hw_rro.desc; int i, page_idx = 0; if (!dev->wlan.hw_rro) return; if (!page_list) return; if (!desc) goto free_pagelist; for (i = 0; i < MTK_WED_RX_PG_BM_CNT; i += MTK_WED_RX_BUF_PER_PAGE) { dma_addr_t buf_addr = page_list[page_idx].phy_addr; void *page = page_list[page_idx++].p; if (!page) break; dma_unmap_page(dev->hw->dev, buf_addr, PAGE_SIZE, DMA_BIDIRECTIONAL); __free_page(page); } dma_free_coherent(dev->hw->dev, dev->hw_rro.size * sizeof(*desc), desc, dev->hw_rro.desc_phys); free_pagelist: kfree(page_list); } static void mtk_wed_free_rx_buffer(struct mtk_wed_device *dev) { struct mtk_wed_bm_desc *desc = dev->rx_buf_ring.desc; if (!desc) return; dev->wlan.release_rx_buf(dev); dma_free_coherent(dev->hw->dev, dev->rx_buf_ring.size * sizeof(*desc), desc, dev->rx_buf_ring.desc_phys); mtk_wed_hwrro_free_buffer(dev); } static void mtk_wed_hwrro_init(struct mtk_wed_device *dev) { if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) return; wed_set(dev, MTK_WED_RRO_PG_BM_RX_DMAM, FIELD_PREP(MTK_WED_RRO_PG_BM_RX_SDL0, 128)); wed_w32(dev, MTK_WED_RRO_PG_BM_BASE, dev->hw_rro.desc_phys); wed_w32(dev, MTK_WED_RRO_PG_BM_INIT_PTR, MTK_WED_RRO_PG_BM_INIT_SW_TAIL_IDX | FIELD_PREP(MTK_WED_RRO_PG_BM_SW_TAIL_IDX, MTK_WED_RX_PG_BM_CNT)); /* enable rx_page_bm to fetch dmad */ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); } static void mtk_wed_rx_buffer_hw_init(struct mtk_wed_device *dev) { wed_w32(dev, MTK_WED_RX_BM_RX_DMAD, FIELD_PREP(MTK_WED_RX_BM_RX_DMAD_SDL0, dev->wlan.rx_size)); wed_w32(dev, MTK_WED_RX_BM_BASE, dev->rx_buf_ring.desc_phys); wed_w32(dev, MTK_WED_RX_BM_INIT_PTR, MTK_WED_RX_BM_INIT_SW_TAIL | FIELD_PREP(MTK_WED_RX_BM_SW_TAIL, dev->wlan.rx_npkt)); wed_w32(dev, MTK_WED_RX_BM_DYN_ALLOC_TH, FIELD_PREP(MTK_WED_RX_BM_DYN_ALLOC_TH_H, 0xffff)); wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); mtk_wed_hwrro_init(dev); } static void mtk_wed_free_ring(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) { if (!ring->desc) return; dma_free_coherent(dev->hw->dev, ring->size * ring->desc_size, ring->desc, ring->desc_phys); } static void mtk_wed_free_rx_rings(struct mtk_wed_device *dev) { mtk_wed_free_rx_buffer(dev); mtk_wed_free_ring(dev, &dev->rro.ring); } static void mtk_wed_free_tx_rings(struct mtk_wed_device *dev) { int i; for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) mtk_wed_free_ring(dev, &dev->tx_ring[i]); for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) mtk_wed_free_ring(dev, &dev->rx_wdma[i]); } static void mtk_wed_set_ext_int(struct mtk_wed_device *dev, bool en) { u32 mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; switch (dev->hw->version) { case 1: mask |= MTK_WED_EXT_INT_STATUS_TX_DRV_R_RESP_ERR; break; case 2: mask |= MTK_WED_EXT_INT_STATUS_RX_FBUF_LO_TH | MTK_WED_EXT_INT_STATUS_RX_FBUF_HI_TH | MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | MTK_WED_EXT_INT_STATUS_TX_DMA_W_RESP_ERR; break; case 3: mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; break; default: break; } if (!dev->hw->num_flows) mask &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; wed_w32(dev, MTK_WED_EXT_INT_MASK, en ? mask : 0); wed_r32(dev, MTK_WED_EXT_INT_MASK); } static void mtk_wed_set_512_support(struct mtk_wed_device *dev, bool enable) { if (!mtk_wed_is_v2(dev->hw)) return; if (enable) { wed_w32(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); wed_w32(dev, MTK_WED_TXP_DW1, FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0103)); } else { wed_w32(dev, MTK_WED_TXP_DW1, FIELD_PREP(MTK_WED_WPDMA_WRITE_TXP, 0x0100)); wed_clr(dev, MTK_WED_TXDP_CTRL, MTK_WED_TXDP_DW9_OVERWR); } } static int mtk_wed_check_wfdma_rx_fill(struct mtk_wed_device *dev, struct mtk_wed_ring *ring) { int i; for (i = 0; i < 3; i++) { u32 cur_idx = readl(ring->wpdma + MTK_WED_RING_OFS_CPU_IDX); if (cur_idx == MTK_WED_RX_RING_SIZE - 1) break; usleep_range(100000, 200000); } if (i == 3) { dev_err(dev->hw->dev, "rx dma enable failed\n"); return -ETIMEDOUT; } return 0; } static void mtk_wed_dma_disable(struct mtk_wed_device *dev) { wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN | MTK_WED_GLO_CFG_RX_DMA_EN); wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN | MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); if (mtk_wed_is_v1(dev->hw)) { regmap_write(dev->hw->mirror, dev->hw->index * 4, 0); wdma_clr(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); } else { wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); if (mtk_wed_is_v3_or_greater(dev->hw) && mtk_wed_get_rx_capa(dev)) { wdma_clr(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); wdma_clr(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); } } mtk_wed_set_512_support(dev, false); } static void mtk_wed_stop(struct mtk_wed_device *dev) { mtk_wed_dma_disable(dev); mtk_wed_set_ext_int(dev, false); wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, 0); wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, 0); wdma_w32(dev, MTK_WDMA_INT_MASK, 0); wdma_w32(dev, MTK_WDMA_INT_GRP2, 0); if (!mtk_wed_get_rx_capa(dev)) return; wed_w32(dev, MTK_WED_EXT_INT_MASK1, 0); wed_w32(dev, MTK_WED_EXT_INT_MASK2, 0); } static void mtk_wed_deinit(struct mtk_wed_device *dev) { mtk_wed_stop(dev); wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WDMA_INT_AGENT_EN | MTK_WED_CTRL_WPDMA_INT_AGENT_EN | MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); if (mtk_wed_is_v1(dev->hw)) return; wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN | MTK_WED_CTRL_WED_RX_BM_EN | MTK_WED_CTRL_RX_RRO_QM_EN); if (mtk_wed_is_v3_or_greater(dev->hw)) { wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_TX_AMSDU); wed_clr(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER); } } static void __mtk_wed_detach(struct mtk_wed_device *dev) { struct mtk_wed_hw *hw = dev->hw; mtk_wed_deinit(dev); mtk_wdma_rx_reset(dev); mtk_wed_reset(dev, MTK_WED_RESET_WED); mtk_wed_amsdu_free_buffer(dev); mtk_wed_free_tx_buffer(dev); mtk_wed_free_tx_rings(dev); if (mtk_wed_get_rx_capa(dev)) { if (hw->wed_wo) mtk_wed_wo_reset(dev); mtk_wed_free_rx_rings(dev); if (hw->wed_wo) mtk_wed_wo_deinit(hw); } if (dev->wlan.bus_type == MTK_WED_BUS_PCIE) { struct device_node *wlan_node; wlan_node = dev->wlan.pci_dev->dev.of_node; if (of_dma_is_coherent(wlan_node) && hw->hifsys) regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), BIT(hw->index)); } if ((!hw_list[!hw->index] || !hw_list[!hw->index]->wed_dev) && hw->eth->dma_dev != hw->eth->dev) mtk_eth_set_dma_device(hw->eth, hw->eth->dev); memset(dev, 0, sizeof(*dev)); module_put(THIS_MODULE); hw->wed_dev = NULL; } static void mtk_wed_detach(struct mtk_wed_device *dev) { mutex_lock(&hw_lock); __mtk_wed_detach(dev); mutex_unlock(&hw_lock); } static void mtk_wed_bus_init(struct mtk_wed_device *dev) { switch (dev->wlan.bus_type) { case MTK_WED_BUS_PCIE: { struct device_node *np = dev->hw->eth->dev->of_node; if (mtk_wed_is_v2(dev->hw)) { struct regmap *regs; regs = syscon_regmap_lookup_by_phandle(np, "mediatek,wed-pcie"); if (IS_ERR(regs)) break; regmap_update_bits(regs, 0, BIT(0), BIT(0)); } if (dev->wlan.msi) { wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pcie_base | 0xc08); wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pcie_base | 0xc04); wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(8)); } else { wed_w32(dev, MTK_WED_PCIE_CFG_INTM, dev->hw->pcie_base | 0x180); wed_w32(dev, MTK_WED_PCIE_CFG_BASE, dev->hw->pcie_base | 0x184); wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, BIT(24)); } wed_w32(dev, MTK_WED_PCIE_INT_CTRL, FIELD_PREP(MTK_WED_PCIE_INT_CTRL_POLL_EN, 2)); /* pcie interrupt control: pola/source selection */ wed_set(dev, MTK_WED_PCIE_INT_CTRL, MTK_WED_PCIE_INT_CTRL_MSK_EN_POLA | MTK_WED_PCIE_INT_CTRL_MSK_IRQ_FILTER | FIELD_PREP(MTK_WED_PCIE_INT_CTRL_SRC_SEL, dev->hw->index)); break; } case MTK_WED_BUS_AXI: wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SIG_SRC | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_SRC_SEL, 0)); break; default: break; } } static void mtk_wed_set_wpdma(struct mtk_wed_device *dev) { int i; if (mtk_wed_is_v1(dev->hw)) { wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_phys); return; } mtk_wed_bus_init(dev); wed_w32(dev, MTK_WED_WPDMA_CFG_BASE, dev->wlan.wpdma_int); wed_w32(dev, MTK_WED_WPDMA_CFG_INT_MASK, dev->wlan.wpdma_mask); wed_w32(dev, MTK_WED_WPDMA_CFG_TX, dev->wlan.wpdma_tx); wed_w32(dev, MTK_WED_WPDMA_CFG_TX_FREE, dev->wlan.wpdma_txfree); if (!mtk_wed_get_rx_capa(dev)) return; wed_w32(dev, MTK_WED_WPDMA_RX_GLO_CFG, dev->wlan.wpdma_rx_glo); wed_w32(dev, dev->hw->soc->regmap.wpdma_rx_ring0, dev->wlan.wpdma_rx); if (!dev->wlan.hw_rro) return; wed_w32(dev, MTK_WED_RRO_RX_D_CFG(0), dev->wlan.wpdma_rx_rro[0]); wed_w32(dev, MTK_WED_RRO_RX_D_CFG(1), dev->wlan.wpdma_rx_rro[1]); for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING_CFG(i), dev->wlan.wpdma_rx_pg + i * 0x10); } static void mtk_wed_hw_init_early(struct mtk_wed_device *dev) { u32 set = FIELD_PREP(MTK_WED_WDMA_GLO_CFG_BT_SIZE, 2); u32 mask = MTK_WED_WDMA_GLO_CFG_BT_SIZE; mtk_wed_deinit(dev); mtk_wed_reset(dev, MTK_WED_RESET_WED); mtk_wed_set_wpdma(dev); if (!mtk_wed_is_v3_or_greater(dev->hw)) { mask |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_DMAD_RECYCLE | MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE; set |= MTK_WED_WDMA_GLO_CFG_DYNAMIC_SKIP_DMAD_PREP | MTK_WED_WDMA_GLO_CFG_IDLE_DMAD_SUPPLY; } wed_m32(dev, MTK_WED_WDMA_GLO_CFG, mask, set); if (mtk_wed_is_v1(dev->hw)) { u32 offset = dev->hw->index ? 0x04000400 : 0; wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | MTK_WDMA_GLO_CFG_RX_INFO2_PRERES | MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); wed_w32(dev, MTK_WED_WDMA_OFFSET0, 0x2a042a20 + offset); wed_w32(dev, MTK_WED_WDMA_OFFSET1, 0x29002800 + offset); wed_w32(dev, MTK_WED_PCIE_CFG_BASE, MTK_PCIE_BASE(dev->hw->index)); } else { wed_w32(dev, MTK_WED_WDMA_CFG_BASE, dev->hw->wdma_phy); wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_ETH_DMAD_FMT); wed_w32(dev, MTK_WED_WDMA_OFFSET0, FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_INTS, MTK_WDMA_INT_STATUS) | FIELD_PREP(MTK_WED_WDMA_OFST0_GLO_CFG, MTK_WDMA_GLO_CFG)); wed_w32(dev, MTK_WED_WDMA_OFFSET1, FIELD_PREP(MTK_WED_WDMA_OFST1_TX_CTRL, MTK_WDMA_RING_TX(0)) | FIELD_PREP(MTK_WED_WDMA_OFST1_RX_CTRL, MTK_WDMA_RING_RX(0))); } } static int mtk_wed_rro_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int size) { ring->desc = dma_alloc_coherent(dev->hw->dev, size * sizeof(*ring->desc), &ring->desc_phys, GFP_KERNEL); if (!ring->desc) return -ENOMEM; ring->desc_size = sizeof(*ring->desc); ring->size = size; return 0; } #define MTK_WED_MIOD_COUNT (MTK_WED_MIOD_ENTRY_CNT * MTK_WED_MIOD_CNT) static int mtk_wed_rro_alloc(struct mtk_wed_device *dev) { struct reserved_mem *rmem; struct device_node *np; int index; index = of_property_match_string(dev->hw->node, "memory-region-names", "wo-dlm"); if (index < 0) return index; np = of_parse_phandle(dev->hw->node, "memory-region", index); if (!np) return -ENODEV; rmem = of_reserved_mem_lookup(np); of_node_put(np); if (!rmem) return -ENODEV; dev->rro.miod_phys = rmem->base; dev->rro.fdbk_phys = MTK_WED_MIOD_COUNT + dev->rro.miod_phys; return mtk_wed_rro_ring_alloc(dev, &dev->rro.ring, MTK_WED_RRO_QUE_CNT); } static int mtk_wed_rro_cfg(struct mtk_wed_device *dev) { struct mtk_wed_wo *wo = dev->hw->wed_wo; struct { struct { __le32 base; __le32 cnt; __le32 unit; } ring[2]; __le32 wed; u8 version; } req = { .ring[0] = { .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE), .cnt = cpu_to_le32(MTK_WED_MIOD_CNT), .unit = cpu_to_le32(MTK_WED_MIOD_ENTRY_CNT), }, .ring[1] = { .base = cpu_to_le32(MTK_WED_WOCPU_VIEW_MIOD_BASE + MTK_WED_MIOD_COUNT), .cnt = cpu_to_le32(MTK_WED_FB_CMD_CNT), .unit = cpu_to_le32(4), }, }; return mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, MTK_WED_WO_CMD_WED_CFG, &req, sizeof(req), true); } static void mtk_wed_rro_hw_init(struct mtk_wed_device *dev) { wed_w32(dev, MTK_WED_RROQM_MIOD_CFG, FIELD_PREP(MTK_WED_RROQM_MIOD_MID_DW, 0x70 >> 2) | FIELD_PREP(MTK_WED_RROQM_MIOD_MOD_DW, 0x10 >> 2) | FIELD_PREP(MTK_WED_RROQM_MIOD_ENTRY_DW, MTK_WED_MIOD_ENTRY_CNT >> 2)); wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL0, dev->rro.miod_phys); wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL1, FIELD_PREP(MTK_WED_RROQM_MIOD_CNT, MTK_WED_MIOD_CNT)); wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL0, dev->rro.fdbk_phys); wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL1, FIELD_PREP(MTK_WED_RROQM_FDBK_CNT, MTK_WED_FB_CMD_CNT)); wed_w32(dev, MTK_WED_RROQM_FDBK_CTRL2, 0); wed_w32(dev, MTK_WED_RROQ_BASE_L, dev->rro.ring.desc_phys); wed_set(dev, MTK_WED_RROQM_RST_IDX, MTK_WED_RROQM_RST_IDX_MIOD | MTK_WED_RROQM_RST_IDX_FDBK); wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); wed_w32(dev, MTK_WED_RROQM_MIOD_CTRL2, MTK_WED_MIOD_CNT - 1); wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); } static void mtk_wed_route_qm_hw_init(struct mtk_wed_device *dev) { wed_w32(dev, MTK_WED_RESET, MTK_WED_RESET_RX_ROUTE_QM); for (;;) { usleep_range(100, 200); if (!(wed_r32(dev, MTK_WED_RESET) & MTK_WED_RESET_RX_ROUTE_QM)) break; } /* configure RX_ROUTE_QM */ if (mtk_wed_is_v2(dev->hw)) { wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_TXDMAD_FPORT); wed_set(dev, MTK_WED_RTQM_GLO_CFG, FIELD_PREP(MTK_WED_RTQM_TXDMAD_FPORT, 0x3 + dev->hw->index)); wed_clr(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); } else { wed_set(dev, MTK_WED_RTQM_ENQ_CFG0, FIELD_PREP(MTK_WED_RTQM_ENQ_CFG_TXDMAD_FPORT, 0x3 + dev->hw->index)); } /* enable RX_ROUTE_QM */ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); } static void mtk_wed_hw_init(struct mtk_wed_device *dev) { if (dev->init_done) return; dev->init_done = true; mtk_wed_set_ext_int(dev, false); wed_w32(dev, MTK_WED_TX_BM_BASE, dev->tx_buf_ring.desc_phys); wed_w32(dev, MTK_WED_TX_BM_BUF_LEN, MTK_WED_PKT_SIZE); if (mtk_wed_is_v1(dev->hw)) { wed_w32(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE | FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, dev->tx_buf_ring.size / 128) | FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, MTK_WED_TX_RING_SIZE / 256)); wed_w32(dev, MTK_WED_TX_BM_DYN_THR, FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO, 1) | MTK_WED_TX_BM_DYN_THR_HI); } else if (mtk_wed_is_v2(dev->hw)) { wed_w32(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE | FIELD_PREP(MTK_WED_TX_BM_CTRL_VLD_GRP_NUM, dev->tx_buf_ring.size / 128) | FIELD_PREP(MTK_WED_TX_BM_CTRL_RSV_GRP_NUM, MTK_WED_TX_RING_SIZE / 256)); wed_w32(dev, MTK_WED_TX_TKID_DYN_THR, FIELD_PREP(MTK_WED_TX_TKID_DYN_THR_LO, 0) | MTK_WED_TX_TKID_DYN_THR_HI); wed_w32(dev, MTK_WED_TX_BM_DYN_THR, FIELD_PREP(MTK_WED_TX_BM_DYN_THR_LO_V2, 0) | MTK_WED_TX_BM_DYN_THR_HI_V2); wed_w32(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE | FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM, dev->tx_buf_ring.size / 128) | FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM, dev->tx_buf_ring.size / 128)); } wed_w32(dev, dev->hw->soc->regmap.tx_bm_tkid, FIELD_PREP(MTK_WED_TX_BM_TKID_START, dev->wlan.token_start) | FIELD_PREP(MTK_WED_TX_BM_TKID_END, dev->wlan.token_start + dev->wlan.nbuf - 1)); mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); if (mtk_wed_is_v3_or_greater(dev->hw)) { /* switch to new bm architecture */ wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_LEGACY_EN); wed_w32(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE | FIELD_PREP(MTK_WED_TX_TKID_CTRL_VLD_GRP_NUM_V3, dev->wlan.nbuf / 128) | FIELD_PREP(MTK_WED_TX_TKID_CTRL_RSV_GRP_NUM_V3, dev->wlan.nbuf / 128)); /* return SKBID + SDP back to bm */ wed_set(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_FREE_FORMAT); wed_w32(dev, MTK_WED_TX_BM_INIT_PTR, MTK_WED_TX_BM_PKT_CNT | MTK_WED_TX_BM_INIT_SW_TAIL_IDX); } if (mtk_wed_is_v1(dev->hw)) { wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); } else if (mtk_wed_get_rx_capa(dev)) { /* rx hw init */ wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, MTK_WED_WPDMA_RX_D_RST_CRX_IDX | MTK_WED_WPDMA_RX_D_RST_DRV_IDX); wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); /* reset prefetch index of ring */ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX0_SIDX, MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_RX1_SIDX, MTK_WED_WPDMA_RX_D_PREF_SIDX_IDX_CLR); /* reset prefetch FIFO of ring */ wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R0_CLR | MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG_R1_CLR); wed_w32(dev, MTK_WED_WPDMA_RX_D_PREF_FIFO_CFG, 0); mtk_wed_rx_buffer_hw_init(dev); mtk_wed_rro_hw_init(dev); mtk_wed_route_qm_hw_init(dev); } wed_clr(dev, MTK_WED_TX_BM_CTRL, MTK_WED_TX_BM_CTRL_PAUSE); if (!mtk_wed_is_v1(dev->hw)) wed_clr(dev, MTK_WED_TX_TKID_CTRL, MTK_WED_TX_TKID_CTRL_PAUSE); } static void mtk_wed_ring_reset(struct mtk_wed_ring *ring, int size, bool tx) { void *head = (void *)ring->desc; int i; for (i = 0; i < size; i++) { struct mtk_wdma_desc *desc; desc = (struct mtk_wdma_desc *)(head + i * ring->desc_size); desc->buf0 = 0; if (tx) desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); else desc->ctrl = cpu_to_le32(MTK_WFDMA_DESC_CTRL_TO_HOST); desc->buf1 = 0; desc->info = 0; } } static int mtk_wed_rx_reset(struct mtk_wed_device *dev) { struct mtk_wed_wo *wo = dev->hw->wed_wo; u8 val = MTK_WED_WO_STATE_SER_RESET; int i, ret; ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, MTK_WED_WO_CMD_CHANGE_STATE, &val, sizeof(val), true); if (ret) return ret; if (dev->wlan.hw_rro) { wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_HW_STS, MTK_WED_RX_IND_CMD_BUSY); mtk_wed_reset(dev, MTK_WED_RESET_RRO_RX_TO_PG); } wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN); ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_BUSY); if (!ret && mtk_wed_is_v3_or_greater(dev->hw)) ret = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, MTK_WED_WPDMA_RX_D_PREF_BUSY); if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_D_DRV); } else { if (mtk_wed_is_v3_or_greater(dev->hw)) { /* 1.a. disable prefetch HW */ wed_clr(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, MTK_WED_WPDMA_RX_D_PREF_EN); mtk_wed_poll_busy(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, MTK_WED_WPDMA_RX_D_PREF_BUSY); wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, MTK_WED_WPDMA_RX_D_RST_DRV_IDX_ALL); } wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, MTK_WED_WPDMA_RX_D_RST_CRX_IDX | MTK_WED_WPDMA_RX_D_RST_DRV_IDX); wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RST_INIT_COMPLETE | MTK_WED_WPDMA_RX_D_FSM_RETURN_IDLE); wed_w32(dev, MTK_WED_WPDMA_RX_D_RST_IDX, 0); } /* reset rro qm */ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_EN); ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_RRO_QM_BUSY); if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_RX_RRO_QM); } else { wed_set(dev, MTK_WED_RROQM_RST_IDX, MTK_WED_RROQM_RST_IDX_MIOD | MTK_WED_RROQM_RST_IDX_FDBK); wed_w32(dev, MTK_WED_RROQM_RST_IDX, 0); } if (dev->wlan.hw_rro) { /* disable rro msdu page drv */ wed_clr(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN); /* disable rro data drv */ wed_clr(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); /* rro msdu page drv reset */ wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR); mtk_wed_poll_busy(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR); /* rro data drv reset */ wed_w32(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_CLR); mtk_wed_poll_busy(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_CLR); } /* reset route qm */ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_EN); ret = mtk_wed_poll_busy(dev, MTK_WED_CTRL, MTK_WED_CTRL_RX_ROUTE_QM_BUSY); if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); } else if (mtk_wed_is_v3_or_greater(dev->hw)) { wed_set(dev, MTK_WED_RTQM_RST, BIT(0)); wed_clr(dev, MTK_WED_RTQM_RST, BIT(0)); mtk_wed_reset(dev, MTK_WED_RESET_RX_ROUTE_QM); } else { wed_set(dev, MTK_WED_RTQM_GLO_CFG, MTK_WED_RTQM_Q_RST); } /* reset tx wdma */ mtk_wdma_tx_reset(dev); /* reset tx wdma drv */ wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN); if (mtk_wed_is_v3_or_greater(dev->hw)) mtk_wed_poll_busy(dev, MTK_WED_WPDMA_STATUS, MTK_WED_WPDMA_STATUS_TX_DRV); else mtk_wed_poll_busy(dev, MTK_WED_CTRL, MTK_WED_CTRL_WDMA_INT_AGENT_BUSY); mtk_wed_reset(dev, MTK_WED_RESET_WDMA_TX_DRV); /* reset wed rx dma */ ret = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_BUSY); wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_RX_DMA_EN); if (ret) { mtk_wed_reset(dev, MTK_WED_RESET_WED_RX_DMA); } else { wed_set(dev, MTK_WED_RESET_IDX, dev->hw->soc->regmap.reset_idx_rx_mask); wed_w32(dev, MTK_WED_RESET_IDX, 0); } /* reset rx bm */ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_EN); mtk_wed_poll_busy(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_BM_BUSY); mtk_wed_reset(dev, MTK_WED_RESET_RX_BM); if (dev->wlan.hw_rro) { wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_EN); mtk_wed_poll_busy(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_PG_BM_BUSY); wed_set(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); wed_clr(dev, MTK_WED_RESET, MTK_WED_RESET_RX_PG_BM); } /* wo change to enable state */ val = MTK_WED_WO_STATE_ENABLE; ret = mtk_wed_mcu_send_msg(wo, MTK_WED_MODULE_ID_WO, MTK_WED_WO_CMD_CHANGE_STATE, &val, sizeof(val), true); if (ret) return ret; /* wed_rx_ring_reset */ for (i = 0; i < ARRAY_SIZE(dev->rx_ring); i++) { if (!dev->rx_ring[i].desc) continue; mtk_wed_ring_reset(&dev->rx_ring[i], MTK_WED_RX_RING_SIZE, false); } mtk_wed_free_rx_buffer(dev); mtk_wed_hwrro_free_buffer(dev); return 0; } static void mtk_wed_reset_dma(struct mtk_wed_device *dev) { bool busy = false; u32 val; int i; for (i = 0; i < ARRAY_SIZE(dev->tx_ring); i++) { if (!dev->tx_ring[i].desc) continue; mtk_wed_ring_reset(&dev->tx_ring[i], MTK_WED_TX_RING_SIZE, true); } /* 1. reset WED tx DMA */ wed_clr(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN); busy = mtk_wed_poll_busy(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_BUSY); if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WED_TX_DMA); } else { wed_w32(dev, MTK_WED_RESET_IDX, dev->hw->soc->regmap.reset_idx_tx_mask); wed_w32(dev, MTK_WED_RESET_IDX, 0); } /* 2. reset WDMA rx DMA */ busy = !!mtk_wdma_rx_reset(dev); if (mtk_wed_is_v3_or_greater(dev->hw)) { val = MTK_WED_WDMA_GLO_CFG_RX_DIS_FSM_AUTO_IDLE | wed_r32(dev, MTK_WED_WDMA_GLO_CFG); val &= ~MTK_WED_WDMA_GLO_CFG_RX_DRV_EN; wed_w32(dev, MTK_WED_WDMA_GLO_CFG, val); } else { wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); } if (!busy) busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_BUSY); if (!busy && mtk_wed_is_v3_or_greater(dev->hw)) busy = mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_BUSY); if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WDMA_RX_DRV); } else { if (mtk_wed_is_v3_or_greater(dev->hw)) { /* 1.a. disable prefetch HW */ wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN); mtk_wed_poll_busy(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_BUSY); wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_DDONE2_EN); /* 2. Reset dma index */ wed_w32(dev, MTK_WED_WDMA_RESET_IDX, MTK_WED_WDMA_RESET_IDX_RX_ALL); } wed_w32(dev, MTK_WED_WDMA_RESET_IDX, MTK_WED_WDMA_RESET_IDX_RX | MTK_WED_WDMA_RESET_IDX_DRV); wed_w32(dev, MTK_WED_WDMA_RESET_IDX, 0); wed_set(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); wed_clr(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RST_INIT_COMPLETE); } /* 3. reset WED WPDMA tx */ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); for (i = 0; i < 100; i++) { if (mtk_wed_is_v1(dev->hw)) val = FIELD_GET(MTK_WED_TX_BM_INTF_TKFIFO_FDEP, wed_r32(dev, MTK_WED_TX_BM_INTF)); else val = FIELD_GET(MTK_WED_TX_TKID_INTF_TKFIFO_FDEP, wed_r32(dev, MTK_WED_TX_TKID_INTF)); if (val == 0x40) break; } mtk_wed_reset(dev, MTK_WED_RESET_TX_FREE_AGENT); wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_TX_BM_EN); mtk_wed_reset(dev, MTK_WED_RESET_TX_BM); /* 4. reset WED WPDMA tx */ busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_TX_DRV_BUSY); wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); if (!busy) busy = mtk_wed_poll_busy(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_RX_DRV_BUSY); if (busy) { mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_INT_AGENT); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_TX_DRV); mtk_wed_reset(dev, MTK_WED_RESET_WPDMA_RX_DRV); if (mtk_wed_is_v3_or_greater(dev->hw)) wed_w32(dev, MTK_WED_RX1_CTRL2, 0); } else { wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, MTK_WED_WPDMA_RESET_IDX_TX | MTK_WED_WPDMA_RESET_IDX_RX); wed_w32(dev, MTK_WED_WPDMA_RESET_IDX, 0); } dev->init_done = false; if (mtk_wed_is_v1(dev->hw)) return; if (!busy) { wed_w32(dev, MTK_WED_RESET_IDX, MTK_WED_RESET_WPDMA_IDX_RX); wed_w32(dev, MTK_WED_RESET_IDX, 0); } if (mtk_wed_is_v3_or_greater(dev->hw)) { /* reset amsdu engine */ wed_clr(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_AMSDU_EN); mtk_wed_reset(dev, MTK_WED_RESET_TX_AMSDU); } if (mtk_wed_get_rx_capa(dev)) mtk_wed_rx_reset(dev); } static int mtk_wed_ring_alloc(struct mtk_wed_device *dev, struct mtk_wed_ring *ring, int size, u32 desc_size, bool tx) { ring->desc = dma_alloc_coherent(dev->hw->dev, size * desc_size, &ring->desc_phys, GFP_KERNEL); if (!ring->desc) return -ENOMEM; ring->desc_size = desc_size; ring->size = size; mtk_wed_ring_reset(ring, size, tx); return 0; } static int mtk_wed_wdma_rx_ring_setup(struct mtk_wed_device *dev, int idx, int size, bool reset) { struct mtk_wed_ring *wdma; if (idx >= ARRAY_SIZE(dev->rx_wdma)) return -EINVAL; wdma = &dev->rx_wdma[idx]; if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, dev->hw->soc->wdma_desc_size, true)) return -ENOMEM; wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, wdma->desc_phys); wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, size); wdma_w32(dev, MTK_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_BASE, wdma->desc_phys); wed_w32(dev, MTK_WED_WDMA_RING_RX(idx) + MTK_WED_RING_OFS_COUNT, size); return 0; } static int mtk_wed_wdma_tx_ring_setup(struct mtk_wed_device *dev, int idx, int size, bool reset) { struct mtk_wed_ring *wdma; if (idx >= ARRAY_SIZE(dev->tx_wdma)) return -EINVAL; wdma = &dev->tx_wdma[idx]; if (!reset && mtk_wed_ring_alloc(dev, wdma, MTK_WED_WDMA_RING_SIZE, dev->hw->soc->wdma_desc_size, true)) return -ENOMEM; if (mtk_wed_is_v3_or_greater(dev->hw)) { struct mtk_wdma_desc *desc = wdma->desc; int i; for (i = 0; i < MTK_WED_WDMA_RING_SIZE; i++) { desc->buf0 = 0; desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); desc->buf1 = 0; desc->info = cpu_to_le32(MTK_WDMA_TXD0_DESC_INFO_DMA_DONE); desc++; desc->buf0 = 0; desc->ctrl = cpu_to_le32(MTK_WDMA_DESC_CTRL_DMA_DONE); desc->buf1 = 0; desc->info = cpu_to_le32(MTK_WDMA_TXD1_DESC_INFO_DMA_DONE); desc++; } } wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, wdma->desc_phys); wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, size); wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); wdma_w32(dev, MTK_WDMA_RING_TX(idx) + MTK_WED_RING_OFS_DMA_IDX, 0); if (reset) mtk_wed_ring_reset(wdma, MTK_WED_WDMA_RING_SIZE, true); if (!idx) { wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_BASE, wdma->desc_phys); wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_COUNT, size); wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_CPU_IDX, 0); wed_w32(dev, MTK_WED_WDMA_RING_TX + MTK_WED_RING_OFS_DMA_IDX, 0); } return 0; } static void mtk_wed_ppe_check(struct mtk_wed_device *dev, struct sk_buff *skb, u32 reason, u32 hash) { struct mtk_eth *eth = dev->hw->eth; struct ethhdr *eh; if (!skb) return; if (reason != MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED) return; skb_set_mac_header(skb, 0); eh = eth_hdr(skb); skb->protocol = eh->h_proto; mtk_ppe_check_skb(eth->ppe[dev->hw->index], skb, hash); } static void mtk_wed_configure_irq(struct mtk_wed_device *dev, u32 irq_mask) { u32 wdma_mask = FIELD_PREP(MTK_WDMA_INT_MASK_RX_DONE, GENMASK(1, 0)); /* wed control cr set */ wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WDMA_INT_AGENT_EN | MTK_WED_CTRL_WPDMA_INT_AGENT_EN | MTK_WED_CTRL_WED_TX_BM_EN | MTK_WED_CTRL_WED_TX_FREE_AGENT_EN); if (mtk_wed_is_v1(dev->hw)) { wed_w32(dev, MTK_WED_PCIE_INT_TRIGGER, MTK_WED_PCIE_INT_TRIGGER_STATUS); wed_w32(dev, MTK_WED_WPDMA_INT_TRIGGER, MTK_WED_WPDMA_INT_TRIGGER_RX_DONE | MTK_WED_WPDMA_INT_TRIGGER_TX_DONE); wed_clr(dev, MTK_WED_WDMA_INT_CTRL, wdma_mask); } else { if (mtk_wed_is_v3_or_greater(dev->hw)) wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_TX_TKID_ALI_EN); /* initail tx interrupt trigger */ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX, MTK_WED_WPDMA_INT_CTRL_TX0_DONE_EN | MTK_WED_WPDMA_INT_CTRL_TX0_DONE_CLR | MTK_WED_WPDMA_INT_CTRL_TX1_DONE_EN | MTK_WED_WPDMA_INT_CTRL_TX1_DONE_CLR | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX0_DONE_TRIG, dev->wlan.tx_tbit[0]) | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX1_DONE_TRIG, dev->wlan.tx_tbit[1])); /* initail txfree interrupt trigger */ wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_TX_FREE, MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_EN | MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_CLR | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_TX_FREE_DONE_TRIG, dev->wlan.txfree_tbit)); if (mtk_wed_get_rx_capa(dev)) { wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RX, MTK_WED_WPDMA_INT_CTRL_RX0_EN | MTK_WED_WPDMA_INT_CTRL_RX0_CLR | MTK_WED_WPDMA_INT_CTRL_RX1_EN | MTK_WED_WPDMA_INT_CTRL_RX1_CLR | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX0_DONE_TRIG, dev->wlan.rx_tbit[0]) | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RX1_DONE_TRIG, dev->wlan.rx_tbit[1])); wdma_mask |= FIELD_PREP(MTK_WDMA_INT_MASK_TX_DONE, GENMASK(1, 0)); } wed_w32(dev, MTK_WED_WDMA_INT_CLR, wdma_mask); wed_set(dev, MTK_WED_WDMA_INT_CTRL, FIELD_PREP(MTK_WED_WDMA_INT_CTRL_POLL_SRC_SEL, dev->wdma_idx)); } wed_w32(dev, MTK_WED_WDMA_INT_TRIGGER, wdma_mask); wdma_w32(dev, MTK_WDMA_INT_MASK, wdma_mask); wdma_w32(dev, MTK_WDMA_INT_GRP2, wdma_mask); wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); wed_w32(dev, MTK_WED_INT_MASK, irq_mask); } #define MTK_WFMDA_RX_DMA_EN BIT(2) static void mtk_wed_dma_enable(struct mtk_wed_device *dev) { int i; if (!mtk_wed_is_v3_or_greater(dev->hw)) { wed_set(dev, MTK_WED_WPDMA_INT_CTRL, MTK_WED_WPDMA_INT_CTRL_SUBRT_ADV); wed_set(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN); wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN | MTK_WDMA_GLO_CFG_RX_INFO1_PRERES | MTK_WDMA_GLO_CFG_RX_INFO2_PRERES); wed_set(dev, MTK_WED_WPDMA_CTRL, MTK_WED_WPDMA_CTRL_SDL1_FIXED); } else { wed_set(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_TX_DRV_EN | MTK_WED_WPDMA_GLO_CFG_RX_DRV_EN | MTK_WED_WPDMA_GLO_CFG_RX_DDONE2_WR); wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_TX_DMA_EN); } wed_set(dev, MTK_WED_GLO_CFG, MTK_WED_GLO_CFG_TX_DMA_EN | MTK_WED_GLO_CFG_RX_DMA_EN); wed_set(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_RX_DRV_EN); if (mtk_wed_is_v1(dev->hw)) { wdma_set(dev, MTK_WDMA_GLO_CFG, MTK_WDMA_GLO_CFG_RX_INFO3_PRERES); return; } wed_set(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_PKT_PROC | MTK_WED_WPDMA_GLO_CFG_RX_DRV_R0_CRX_SYNC); if (mtk_wed_is_v3_or_greater(dev->hw)) { wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, FIELD_PREP(MTK_WED_WDMA_RX_PREF_BURST_SIZE, 0x10) | FIELD_PREP(MTK_WED_WDMA_RX_PREF_LOW_THRES, 0x8)); wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_DDONE2_EN); wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_EN); wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK_LAST); wed_set(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_TX_DDONE_CHK | MTK_WED_WPDMA_GLO_CFG_RX_DRV_EVENT_PKT_FMT_CHK | MTK_WED_WPDMA_GLO_CFG_RX_DRV_UNS_VER_FORCE_4); wdma_set(dev, MTK_WDMA_PREF_RX_CFG, MTK_WDMA_PREF_RX_CFG_PREF_EN); wdma_set(dev, MTK_WDMA_WRBK_RX_CFG, MTK_WDMA_WRBK_RX_CFG_WRBK_EN); } wed_clr(dev, MTK_WED_WPDMA_GLO_CFG, MTK_WED_WPDMA_GLO_CFG_TX_TKID_KEEP | MTK_WED_WPDMA_GLO_CFG_TX_DMAD_DW3_PREV); if (!mtk_wed_get_rx_capa(dev)) return; wed_set(dev, MTK_WED_WDMA_GLO_CFG, MTK_WED_WDMA_GLO_CFG_TX_DRV_EN | MTK_WED_WDMA_GLO_CFG_TX_DDONE_CHK); wed_clr(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RXD_READ_LEN); wed_set(dev, MTK_WED_WPDMA_RX_D_GLO_CFG, MTK_WED_WPDMA_RX_D_RX_DRV_EN | FIELD_PREP(MTK_WED_WPDMA_RX_D_RXD_READ_LEN, 0x18) | FIELD_PREP(MTK_WED_WPDMA_RX_D_INIT_PHASE_RXEN_SEL, 0x2)); if (mtk_wed_is_v3_or_greater(dev->hw)) { wed_set(dev, MTK_WED_WPDMA_RX_D_PREF_CFG, MTK_WED_WPDMA_RX_D_PREF_EN | FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_BURST_SIZE, 0x10) | FIELD_PREP(MTK_WED_WPDMA_RX_D_PREF_LOW_THRES, 0x8)); wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_RX_D_DRV_EN); wdma_set(dev, MTK_WDMA_PREF_TX_CFG, MTK_WDMA_PREF_TX_CFG_PREF_EN); wdma_set(dev, MTK_WDMA_WRBK_TX_CFG, MTK_WDMA_WRBK_TX_CFG_WRBK_EN); } for (i = 0; i < MTK_WED_RX_QUEUES; i++) { struct mtk_wed_ring *ring = &dev->rx_ring[i]; u32 val; if (!(ring->flags & MTK_WED_RING_CONFIGURED)) continue; /* queue is not configured by mt76 */ if (mtk_wed_check_wfdma_rx_fill(dev, ring)) { dev_err(dev->hw->dev, "rx_ring(%d) dma enable failed\n", i); continue; } val = wifi_r32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base) | MTK_WFMDA_RX_DMA_EN; wifi_w32(dev, dev->wlan.wpdma_rx_glo - dev->wlan.phy_base, val); } } static void mtk_wed_start_hw_rro(struct mtk_wed_device *dev, u32 irq_mask, bool reset) { int i; wed_w32(dev, MTK_WED_WPDMA_INT_MASK, irq_mask); wed_w32(dev, MTK_WED_INT_MASK, irq_mask); if (!mtk_wed_get_rx_capa(dev) || !dev->wlan.hw_rro) return; if (reset) { wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN); return; } wed_set(dev, MTK_WED_RRO_RX_D_CFG(2), MTK_WED_RRO_MSDU_PG_DRV_CLR); wed_w32(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_CLR); wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_RX, MTK_WED_WPDMA_INT_CTRL_RRO_RX0_EN | MTK_WED_WPDMA_INT_CTRL_RRO_RX0_CLR | MTK_WED_WPDMA_INT_CTRL_RRO_RX1_EN | MTK_WED_WPDMA_INT_CTRL_RRO_RX1_CLR | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX0_DONE_TRIG, dev->wlan.rro_rx_tbit[0]) | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_RX1_DONE_TRIG, dev->wlan.rro_rx_tbit[1])); wed_w32(dev, MTK_WED_WPDMA_INT_CTRL_RRO_MSDU_PG, MTK_WED_WPDMA_INT_CTRL_RRO_PG0_EN | MTK_WED_WPDMA_INT_CTRL_RRO_PG0_CLR | MTK_WED_WPDMA_INT_CTRL_RRO_PG1_EN | MTK_WED_WPDMA_INT_CTRL_RRO_PG1_CLR | MTK_WED_WPDMA_INT_CTRL_RRO_PG2_EN | MTK_WED_WPDMA_INT_CTRL_RRO_PG2_CLR | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG0_DONE_TRIG, dev->wlan.rx_pg_tbit[0]) | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG1_DONE_TRIG, dev->wlan.rx_pg_tbit[1]) | FIELD_PREP(MTK_WED_WPDMA_INT_CTRL_RRO_PG2_DONE_TRIG, dev->wlan.rx_pg_tbit[2])); /* RRO_MSDU_PG_RING2_CFG1_FLD_DRV_EN should be enabled after * WM FWDL completed, otherwise RRO_MSDU_PG ring may broken */ wed_set(dev, MTK_WED_RRO_MSDU_PG_RING2_CFG, MTK_WED_RRO_MSDU_PG_DRV_EN); for (i = 0; i < MTK_WED_RX_QUEUES; i++) { struct mtk_wed_ring *ring = &dev->rx_rro_ring[i]; if (!(ring->flags & MTK_WED_RING_CONFIGURED)) continue; if (mtk_wed_check_wfdma_rx_fill(dev, ring)) dev_err(dev->hw->dev, "rx_rro_ring(%d) initialization failed\n", i); } for (i = 0; i < MTK_WED_RX_PAGE_QUEUES; i++) { struct mtk_wed_ring *ring = &dev->rx_page_ring[i]; if (!(ring->flags & MTK_WED_RING_CONFIGURED)) continue; if (mtk_wed_check_wfdma_rx_fill(dev, ring)) dev_err(dev->hw->dev, "rx_page_ring(%d) initialization failed\n", i); } } static void mtk_wed_rro_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) { struct mtk_wed_ring *ring = &dev->rx_rro_ring[idx]; ring->wpdma = regs; wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_BASE, readl(regs)); wed_w32(dev, MTK_WED_RRO_RX_D_RX(idx) + MTK_WED_RING_OFS_COUNT, readl(regs + MTK_WED_RING_OFS_COUNT)); ring->flags |= MTK_WED_RING_CONFIGURED; } static void mtk_wed_msdu_pg_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs) { struct mtk_wed_ring *ring = &dev->rx_page_ring[idx]; ring->wpdma = regs; wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_BASE, readl(regs)); wed_w32(dev, MTK_WED_RRO_MSDU_PG_CTRL0(idx) + MTK_WED_RING_OFS_COUNT, readl(regs + MTK_WED_RING_OFS_COUNT)); ring->flags |= MTK_WED_RING_CONFIGURED; } static int mtk_wed_ind_rx_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) { struct mtk_wed_ring *ring = &dev->ind_cmd_ring; u32 val = readl(regs + MTK_WED_RING_OFS_COUNT); int i, count = 0; ring->wpdma = regs; wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_BASE, readl(regs) & 0xfffffff0); wed_w32(dev, MTK_WED_IND_CMD_RX_CTRL1 + MTK_WED_RING_OFS_COUNT, readl(regs + MTK_WED_RING_OFS_COUNT)); /* ack sn cr */ wed_w32(dev, MTK_WED_RRO_CFG0, dev->wlan.phy_base + dev->wlan.ind_cmd.ack_sn_addr); wed_w32(dev, MTK_WED_RRO_CFG1, FIELD_PREP(MTK_WED_RRO_CFG1_MAX_WIN_SZ, dev->wlan.ind_cmd.win_size) | FIELD_PREP(MTK_WED_RRO_CFG1_PARTICL_SE_ID, dev->wlan.ind_cmd.particular_sid)); /* particular session addr element */ wed_w32(dev, MTK_WED_ADDR_ELEM_CFG0, dev->wlan.ind_cmd.particular_se_phys); for (i = 0; i < dev->wlan.ind_cmd.se_group_nums; i++) { wed_w32(dev, MTK_WED_RADDR_ELEM_TBL_WDATA, dev->wlan.ind_cmd.addr_elem_phys[i] >> 4); wed_w32(dev, MTK_WED_ADDR_ELEM_TBL_CFG, MTK_WED_ADDR_ELEM_TBL_WR | (i & 0x7f)); val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); while (!(val & MTK_WED_ADDR_ELEM_TBL_WR_RDY) && count++ < 100) val = wed_r32(dev, MTK_WED_ADDR_ELEM_TBL_CFG); if (count >= 100) dev_err(dev->hw->dev, "write ba session base failed\n"); } /* pn check init */ for (i = 0; i < dev->wlan.ind_cmd.particular_sid; i++) { wed_w32(dev, MTK_WED_PN_CHECK_WDATA_M, MTK_WED_PN_CHECK_IS_FIRST); wed_w32(dev, MTK_WED_PN_CHECK_CFG, MTK_WED_PN_CHECK_WR | FIELD_PREP(MTK_WED_PN_CHECK_SE_ID, i)); count = 0; val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); while (!(val & MTK_WED_PN_CHECK_WR_RDY) && count++ < 100) val = wed_r32(dev, MTK_WED_PN_CHECK_CFG); if (count >= 100) dev_err(dev->hw->dev, "session(%d) initialization failed\n", i); } wed_w32(dev, MTK_WED_RX_IND_CMD_CNT0, MTK_WED_RX_IND_CMD_DBG_CNT_EN); wed_set(dev, MTK_WED_CTRL, MTK_WED_CTRL_WED_RX_IND_CMD_EN); return 0; } static void mtk_wed_start(struct mtk_wed_device *dev, u32 irq_mask) { int i; if (mtk_wed_get_rx_capa(dev) && mtk_wed_rx_buffer_alloc(dev)) return; for (i = 0; i < ARRAY_SIZE(dev->rx_wdma); i++) if (!dev->rx_wdma[i].desc) mtk_wed_wdma_rx_ring_setup(dev, i, 16, false); mtk_wed_hw_init(dev); mtk_wed_configure_irq(dev, irq_mask); mtk_wed_set_ext_int(dev, true); if (mtk_wed_is_v1(dev->hw)) { u32 val = dev->wlan.wpdma_phys | MTK_PCIE_MIRROR_MAP_EN | FIELD_PREP(MTK_PCIE_MIRROR_MAP_WED_ID, dev->hw->index); val |= BIT(0) | (BIT(1) * !!dev->hw->index); regmap_write(dev->hw->mirror, dev->hw->index * 4, val); } else if (mtk_wed_get_rx_capa(dev)) { /* driver set mid ready and only once */ wed_w32(dev, MTK_WED_EXT_INT_MASK1, MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); wed_w32(dev, MTK_WED_EXT_INT_MASK2, MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); wed_r32(dev, MTK_WED_EXT_INT_MASK1); wed_r32(dev, MTK_WED_EXT_INT_MASK2); if (mtk_wed_is_v3_or_greater(dev->hw)) { wed_w32(dev, MTK_WED_EXT_INT_MASK3, MTK_WED_EXT_INT_STATUS_WPDMA_MID_RDY); wed_r32(dev, MTK_WED_EXT_INT_MASK3); } if (mtk_wed_rro_cfg(dev)) return; } mtk_wed_set_512_support(dev, dev->wlan.wcid_512); mtk_wed_amsdu_init(dev); mtk_wed_dma_enable(dev); dev->running = true; } static int mtk_wed_attach(struct mtk_wed_device *dev) __releases(RCU) { struct mtk_wed_hw *hw; struct device *device; int ret = 0; RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "mtk_wed_attach without holding the RCU read lock"); if ((dev->wlan.bus_type == MTK_WED_BUS_PCIE && pci_domain_nr(dev->wlan.pci_dev->bus) > 1) || !try_module_get(THIS_MODULE)) ret = -ENODEV; rcu_read_unlock(); if (ret) return ret; mutex_lock(&hw_lock); hw = mtk_wed_assign(dev); if (!hw) { module_put(THIS_MODULE); ret = -ENODEV; goto unlock; } device = dev->wlan.bus_type == MTK_WED_BUS_PCIE ? &dev->wlan.pci_dev->dev : &dev->wlan.platform_dev->dev; dev_info(device, "attaching wed device %d version %d\n", hw->index, hw->version); dev->hw = hw; dev->dev = hw->dev; dev->irq = hw->irq; dev->wdma_idx = hw->index; dev->version = hw->version; dev->hw->pcie_base = mtk_wed_get_pcie_base(dev); if (hw->eth->dma_dev == hw->eth->dev && of_dma_is_coherent(hw->eth->dev->of_node)) mtk_eth_set_dma_device(hw->eth, hw->dev); ret = mtk_wed_tx_buffer_alloc(dev); if (ret) goto out; ret = mtk_wed_amsdu_buffer_alloc(dev); if (ret) goto out; if (mtk_wed_get_rx_capa(dev)) { ret = mtk_wed_rro_alloc(dev); if (ret) goto out; } mtk_wed_hw_init_early(dev); if (mtk_wed_is_v1(hw)) regmap_update_bits(hw->hifsys, HIFSYS_DMA_AG_MAP, BIT(hw->index), 0); else dev->rev_id = wed_r32(dev, MTK_WED_REV_ID); if (mtk_wed_get_rx_capa(dev)) ret = mtk_wed_wo_init(hw); out: if (ret) { dev_err(dev->hw->dev, "failed to attach wed device\n"); __mtk_wed_detach(dev); } unlock: mutex_unlock(&hw_lock); return ret; } static int mtk_wed_tx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, bool reset) { struct mtk_wed_ring *ring = &dev->tx_ring[idx]; /* * Tx ring redirection: * Instead of configuring the WLAN PDMA TX ring directly, the WLAN * driver allocated DMA ring gets configured into WED MTK_WED_RING_TX(n) * registers. * * WED driver posts its own DMA ring as WLAN PDMA TX and configures it * into MTK_WED_WPDMA_RING_TX(n) registers. * It gets filled with packets picked up from WED TX ring and from * WDMA RX. */ if (WARN_ON(idx >= ARRAY_SIZE(dev->tx_ring))) return -EINVAL; if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_TX_RING_SIZE, sizeof(*ring->desc), true)) return -ENOMEM; if (mtk_wed_wdma_rx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset)) return -ENOMEM; ring->reg_base = MTK_WED_RING_TX(idx); ring->wpdma = regs; if (mtk_wed_is_v3_or_greater(dev->hw) && idx == 1) { /* reset prefetch index */ wed_set(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); wed_clr(dev, MTK_WED_WDMA_RX_PREF_CFG, MTK_WED_WDMA_RX_PREF_RX0_SIDX_CLR | MTK_WED_WDMA_RX_PREF_RX1_SIDX_CLR); /* reset prefetch FIFO */ wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, MTK_WED_WDMA_RX_PREF_FIFO_RX0_CLR | MTK_WED_WDMA_RX_PREF_FIFO_RX1_CLR); wed_w32(dev, MTK_WED_WDMA_RX_PREF_FIFO_CFG, 0); } /* WED -> WPDMA */ wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); wpdma_tx_w32(dev, idx, MTK_WED_RING_OFS_CPU_IDX, 0); wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_BASE, ring->desc_phys); wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_COUNT, MTK_WED_TX_RING_SIZE); wed_w32(dev, MTK_WED_WPDMA_RING_TX(idx) + MTK_WED_RING_OFS_CPU_IDX, 0); return 0; } static int mtk_wed_txfree_ring_setup(struct mtk_wed_device *dev, void __iomem *regs) { struct mtk_wed_ring *ring = &dev->txfree_ring; int i, index = mtk_wed_is_v1(dev->hw); /* * For txfree event handling, the same DMA ring is shared between WED * and WLAN. The WLAN driver accesses the ring index registers through * WED */ ring->reg_base = MTK_WED_RING_RX(index); ring->wpdma = regs; for (i = 0; i < 12; i += 4) { u32 val = readl(regs + i); wed_w32(dev, MTK_WED_RING_RX(index) + i, val); wed_w32(dev, MTK_WED_WPDMA_RING_RX(index) + i, val); } return 0; } static int mtk_wed_rx_ring_setup(struct mtk_wed_device *dev, int idx, void __iomem *regs, bool reset) { struct mtk_wed_ring *ring = &dev->rx_ring[idx]; if (WARN_ON(idx >= ARRAY_SIZE(dev->rx_ring))) return -EINVAL; if (!reset && mtk_wed_ring_alloc(dev, ring, MTK_WED_RX_RING_SIZE, sizeof(*ring->desc), false)) return -ENOMEM; if (mtk_wed_wdma_tx_ring_setup(dev, idx, MTK_WED_WDMA_RING_SIZE, reset)) return -ENOMEM; ring->reg_base = MTK_WED_RING_RX_DATA(idx); ring->wpdma = regs; ring->flags |= MTK_WED_RING_CONFIGURED; /* WPDMA -> WED */ wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_BASE, ring->desc_phys); wpdma_rx_w32(dev, idx, MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_BASE, ring->desc_phys); wed_w32(dev, MTK_WED_WPDMA_RING_RX_DATA(idx) + MTK_WED_RING_OFS_COUNT, MTK_WED_RX_RING_SIZE); return 0; } static u32 mtk_wed_irq_get(struct mtk_wed_device *dev, u32 mask) { u32 val, ext_mask; if (mtk_wed_is_v3_or_greater(dev->hw)) ext_mask = MTK_WED_EXT_INT_STATUS_RX_DRV_COHERENT | MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; else ext_mask = MTK_WED_EXT_INT_STATUS_ERROR_MASK; val = wed_r32(dev, MTK_WED_EXT_INT_STATUS); wed_w32(dev, MTK_WED_EXT_INT_STATUS, val); val &= ext_mask; if (!dev->hw->num_flows) val &= ~MTK_WED_EXT_INT_STATUS_TKID_WO_PYLD; if (val && net_ratelimit()) pr_err("mtk_wed%d: error status=%08x\n", dev->hw->index, val); val = wed_r32(dev, MTK_WED_INT_STATUS); val &= mask; wed_w32(dev, MTK_WED_INT_STATUS, val); /* ACK */ return val; } static void mtk_wed_irq_set_mask(struct mtk_wed_device *dev, u32 mask) { mtk_wed_set_ext_int(dev, !!mask); wed_w32(dev, MTK_WED_INT_MASK, mask); } int mtk_wed_flow_add(int index) { struct mtk_wed_hw *hw = hw_list[index]; int ret = 0; mutex_lock(&hw_lock); if (!hw || !hw->wed_dev) { ret = -ENODEV; goto out; } if (!hw->wed_dev->wlan.offload_enable) goto out; if (hw->num_flows) { hw->num_flows++; goto out; } ret = hw->wed_dev->wlan.offload_enable(hw->wed_dev); if (!ret) hw->num_flows++; mtk_wed_set_ext_int(hw->wed_dev, true); out: mutex_unlock(&hw_lock); return ret; } void mtk_wed_flow_remove(int index) { struct mtk_wed_hw *hw = hw_list[index]; mutex_lock(&hw_lock); if (!hw || !hw->wed_dev) goto out; if (!hw->wed_dev->wlan.offload_disable) goto out; if (--hw->num_flows) goto out; hw->wed_dev->wlan.offload_disable(hw->wed_dev); mtk_wed_set_ext_int(hw->wed_dev, true); out: mutex_unlock(&hw_lock); } static int mtk_wed_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv) { struct mtk_wed_flow_block_priv *priv = cb_priv; struct flow_cls_offload *cls = type_data; struct mtk_wed_hw *hw = NULL; if (!priv || !tc_can_offload(priv->dev)) return -EOPNOTSUPP; if (type != TC_SETUP_CLSFLOWER) return -EOPNOTSUPP; hw = priv->hw; return mtk_flow_offload_cmd(hw->eth, cls, hw->index); } static int mtk_wed_setup_tc_block(struct mtk_wed_hw *hw, struct net_device *dev, struct flow_block_offload *f) { struct mtk_wed_flow_block_priv *priv; static LIST_HEAD(block_cb_list); struct flow_block_cb *block_cb; struct mtk_eth *eth = hw->eth; flow_setup_cb_t *cb; if (!eth->soc->offload_version) return -EOPNOTSUPP; if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) return -EOPNOTSUPP; cb = mtk_wed_setup_tc_block_cb; f->driver_block_list = &block_cb_list; switch (f->command) { case FLOW_BLOCK_BIND: block_cb = flow_block_cb_lookup(f->block, cb, dev); if (block_cb) { flow_block_cb_incref(block_cb); return 0; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->hw = hw; priv->dev = dev; block_cb = flow_block_cb_alloc(cb, dev, priv, NULL); if (IS_ERR(block_cb)) { kfree(priv); return PTR_ERR(block_cb); } flow_block_cb_incref(block_cb); flow_block_cb_add(block_cb, f); list_add_tail(&block_cb->driver_list, &block_cb_list); return 0; case FLOW_BLOCK_UNBIND: block_cb = flow_block_cb_lookup(f->block, cb, dev); if (!block_cb) return -ENOENT; if (!flow_block_cb_decref(block_cb)) { flow_block_cb_remove(block_cb, f); list_del(&block_cb->driver_list); kfree(block_cb->cb_priv); block_cb->cb_priv = NULL; } return 0; default: return -EOPNOTSUPP; } } static int mtk_wed_setup_tc(struct mtk_wed_device *wed, struct net_device *dev, enum tc_setup_type type, void *type_data) { struct mtk_wed_hw *hw = wed->hw; if (mtk_wed_is_v1(hw)) return -EOPNOTSUPP; switch (type) { case TC_SETUP_BLOCK: case TC_SETUP_FT: return mtk_wed_setup_tc_block(hw, dev, type_data); default: return -EOPNOTSUPP; } } void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, void __iomem *wdma, phys_addr_t wdma_phy, int index) { static const struct mtk_wed_ops wed_ops = { .attach = mtk_wed_attach, .tx_ring_setup = mtk_wed_tx_ring_setup, .rx_ring_setup = mtk_wed_rx_ring_setup, .txfree_ring_setup = mtk_wed_txfree_ring_setup, .msg_update = mtk_wed_mcu_msg_update, .start = mtk_wed_start, .stop = mtk_wed_stop, .reset_dma = mtk_wed_reset_dma, .reg_read = wed_r32, .reg_write = wed_w32, .irq_get = mtk_wed_irq_get, .irq_set_mask = mtk_wed_irq_set_mask, .detach = mtk_wed_detach, .ppe_check = mtk_wed_ppe_check, .setup_tc = mtk_wed_setup_tc, .start_hw_rro = mtk_wed_start_hw_rro, .rro_rx_ring_setup = mtk_wed_rro_rx_ring_setup, .msdu_pg_rx_ring_setup = mtk_wed_msdu_pg_rx_ring_setup, .ind_rx_ring_setup = mtk_wed_ind_rx_ring_setup, }; struct device_node *eth_np = eth->dev->of_node; struct platform_device *pdev; struct mtk_wed_hw *hw; struct regmap *regs; int irq; if (!np) return; pdev = of_find_device_by_node(np); if (!pdev) goto err_of_node_put; get_device(&pdev->dev); irq = platform_get_irq(pdev, 0); if (irq < 0) goto err_put_device; regs = syscon_regmap_lookup_by_phandle(np, NULL); if (IS_ERR(regs)) goto err_put_device; rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); mutex_lock(&hw_lock); if (WARN_ON(hw_list[index])) goto unlock; hw = kzalloc(sizeof(*hw), GFP_KERNEL); if (!hw) goto unlock; hw->node = np; hw->regs = regs; hw->eth = eth; hw->dev = &pdev->dev; hw->wdma_phy = wdma_phy; hw->wdma = wdma; hw->index = index; hw->irq = irq; hw->version = eth->soc->version; switch (hw->version) { case 2: hw->soc = &mt7986_data; break; case 3: hw->soc = &mt7988_data; break; default: case 1: hw->mirror = syscon_regmap_lookup_by_phandle(eth_np, "mediatek,pcie-mirror"); hw->hifsys = syscon_regmap_lookup_by_phandle(eth_np, "mediatek,hifsys"); if (IS_ERR(hw->mirror) || IS_ERR(hw->hifsys)) { kfree(hw); goto unlock; } if (!index) { regmap_write(hw->mirror, 0, 0); regmap_write(hw->mirror, 4, 0); } hw->soc = &mt7622_data; break; } mtk_wed_hw_add_debugfs(hw); hw_list[index] = hw; mutex_unlock(&hw_lock); return; unlock: mutex_unlock(&hw_lock); err_put_device: put_device(&pdev->dev); err_of_node_put: of_node_put(np); } void mtk_wed_exit(void) { int i; rcu_assign_pointer(mtk_soc_wed_ops, NULL); synchronize_rcu(); for (i = 0; i < ARRAY_SIZE(hw_list); i++) { struct mtk_wed_hw *hw; hw = hw_list[i]; if (!hw) continue; hw_list[i] = NULL; debugfs_remove(hw->debugfs_dir); put_device(hw->dev); of_node_put(hw->node); kfree(hw); } }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2003 Sistina Software Limited. * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. * * Device-Mapper dirty region hash interface. * * This file is released under the GPL. */ #ifndef DM_REGION_HASH_H #define DM_REGION_HASH_H #include <linux/dm-dirty-log.h> /* *---------------------------------------------------------------- * Region hash *---------------------------------------------------------------- */ struct dm_region_hash; struct dm_region; /* * States a region can have. */ enum dm_rh_region_states { DM_RH_CLEAN = 0x01, /* No writes in flight. */ DM_RH_DIRTY = 0x02, /* Writes in flight. */ DM_RH_NOSYNC = 0x04, /* Out of sync. */ DM_RH_RECOVERING = 0x08, /* Under resynchronization. */ }; /* * Region hash create/destroy. */ struct bio_list; struct dm_region_hash *dm_region_hash_create( void *context, void (*dispatch_bios)(void *context, struct bio_list *bios), void (*wakeup_workers)(void *context), void (*wakeup_all_recovery_waiters)(void *context), sector_t target_begin, unsigned int max_recovery, struct dm_dirty_log *log, uint32_t region_size, region_t nr_regions); void dm_region_hash_destroy(struct dm_region_hash *rh); struct dm_dirty_log *dm_rh_dirty_log(struct dm_region_hash *rh); /* * Conversion functions. */ region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio); sector_t dm_rh_region_to_sector(struct dm_region_hash *rh, region_t region); void *dm_rh_region_context(struct dm_region *reg); /* * Get region size and key (ie. number of the region). */ sector_t dm_rh_get_region_size(struct dm_region_hash *rh); region_t dm_rh_get_region_key(struct dm_region *reg); /* * Get/set/update region state (and dirty log). * */ int dm_rh_get_state(struct dm_region_hash *rh, region_t region, int may_block); void dm_rh_set_state(struct dm_region_hash *rh, region_t region, enum dm_rh_region_states state, int may_block); /* Non-zero errors_handled leaves the state of the region NOSYNC */ void dm_rh_update_states(struct dm_region_hash *rh, int errors_handled); /* Flush the region hash and dirty log. */ int dm_rh_flush(struct dm_region_hash *rh); /* Inc/dec pending count on regions. */ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios); void dm_rh_dec(struct dm_region_hash *rh, region_t region); /* Delay bios on regions. */ void dm_rh_delay(struct dm_region_hash *rh, struct bio *bio); void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio); /* * Region recovery control. */ /* Prepare some regions for recovery by starting to quiesce them. */ void dm_rh_recovery_prepare(struct dm_region_hash *rh); /* Try fetching a quiesced region for recovery. */ struct dm_region *dm_rh_recovery_start(struct dm_region_hash *rh); /* Report recovery end on a region. */ void dm_rh_recovery_end(struct dm_region *reg, int error); /* Returns number of regions with recovery work outstanding. */ int dm_rh_recovery_in_flight(struct dm_region_hash *rh); /* Start/stop recovery. */ void dm_rh_start_recovery(struct dm_region_hash *rh); void dm_rh_stop_recovery(struct dm_region_hash *rh); #endif /* DM_REGION_HASH_H */
// SPDX-License-Identifier: GPL-2.0+ // // soc-compress.c -- ALSA SoC Compress // // Copyright (C) 2012 Intel Corp. // // Authors: Namarta Kohli <[email protected]> // Ramesh Babu K V <[email protected]> // Vinod Koul <[email protected]> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <sound/core.h> #include <sound/compress_params.h> #include <sound/compress_driver.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/soc-dpcm.h> #include <sound/soc-link.h> static int snd_soc_compr_components_open(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int ret = 0; int i; for_each_rtd_components(rtd, i, component) { ret = snd_soc_component_module_get_when_open(component, cstream); if (ret < 0) break; ret = snd_soc_component_compr_open(component, cstream); if (ret < 0) break; } return ret; } static void snd_soc_compr_components_free(struct snd_compr_stream *cstream, int rollback) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_component *component; int i; for_each_rtd_components(rtd, i, component) { snd_soc_component_compr_free(component, cstream, rollback); snd_soc_component_module_put_when_close(component, cstream, rollback); } } static int soc_compr_clean(struct snd_compr_stream *cstream, int rollback) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ snd_soc_dpcm_mutex_lock(rtd); if (!rollback) snd_soc_runtime_deactivate(rtd, stream); snd_soc_dai_digital_mute(codec_dai, 1, stream); if (!snd_soc_dai_active(cpu_dai)) cpu_dai->symmetric_rate = 0; if (!snd_soc_dai_active(codec_dai)) codec_dai->symmetric_rate = 0; snd_soc_link_compr_shutdown(cstream, rollback); snd_soc_compr_components_free(cstream, rollback); snd_soc_dai_compr_shutdown(cpu_dai, cstream, rollback); if (!rollback) snd_soc_dapm_stream_stop(rtd, stream); snd_soc_dpcm_mutex_unlock(rtd); snd_soc_pcm_component_pm_runtime_put(rtd, cstream, rollback); return 0; } static int soc_compr_free(struct snd_compr_stream *cstream) { return soc_compr_clean(cstream, 0); } static int soc_compr_open(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; ret = snd_soc_pcm_component_pm_runtime_get(rtd, cstream); if (ret < 0) goto err_no_lock; snd_soc_dpcm_mutex_lock(rtd); ret = snd_soc_dai_compr_startup(cpu_dai, cstream); if (ret < 0) goto err; ret = snd_soc_compr_components_open(cstream); if (ret < 0) goto err; ret = snd_soc_link_compr_startup(cstream); if (ret < 0) goto err; snd_soc_runtime_activate(rtd, stream); err: snd_soc_dpcm_mutex_unlock(rtd); err_no_lock: if (ret < 0) soc_compr_clean(cstream, 1); return ret; } static int soc_compr_open_fe(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *fe = cstream->private_data; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(fe, 0); struct snd_soc_dpcm *dpcm; struct snd_soc_dapm_widget_list *list; int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; snd_soc_card_mutex_lock(fe->card); ret = dpcm_path_get(fe, stream, &list); if (ret < 0) goto be_err; snd_soc_dpcm_mutex_lock(fe); /* calculate valid and active FE <-> BE dpcms */ dpcm_process_paths(fe, stream, &list, 1); fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; ret = dpcm_be_dai_startup(fe, stream); if (ret < 0) { /* clean up all links */ for_each_dpcm_be(fe, stream, dpcm) dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; dpcm_be_disconnect(fe, stream); goto out; } ret = snd_soc_dai_compr_startup(cpu_dai, cstream); if (ret < 0) goto out; ret = snd_soc_compr_components_open(cstream); if (ret < 0) goto open_err; ret = snd_soc_link_compr_startup(cstream); if (ret < 0) goto machine_err; dpcm_clear_pending_state(fe, stream); dpcm_path_put(&list); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_OPEN; fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; snd_soc_runtime_activate(fe, stream); snd_soc_dpcm_mutex_unlock(fe); snd_soc_card_mutex_unlock(fe->card); return 0; machine_err: snd_soc_compr_components_free(cstream, 1); open_err: snd_soc_dai_compr_shutdown(cpu_dai, cstream, 1); out: dpcm_path_put(&list); snd_soc_dpcm_mutex_unlock(fe); be_err: fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; snd_soc_card_mutex_unlock(fe->card); return ret; } static int soc_compr_free_fe(struct snd_compr_stream *cstream) { struct snd_soc_pcm_runtime *fe = cstream->private_data; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(fe, 0); struct snd_soc_dpcm *dpcm; int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ snd_soc_card_mutex_lock(fe->card); snd_soc_dpcm_mutex_lock(fe); snd_soc_runtime_deactivate(fe, stream); fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; dpcm_be_dai_hw_free(fe, stream); dpcm_be_dai_shutdown(fe, stream); /* mark FE's links ready to prune */ for_each_dpcm_be(fe, stream, dpcm) dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; dpcm_be_disconnect(fe, stream); snd_soc_dpcm_mutex_unlock(fe); snd_soc_link_compr_shutdown(cstream, 0); snd_soc_compr_components_free(cstream, 0); snd_soc_dai_compr_shutdown(cpu_dai, cstream, 0); snd_soc_card_mutex_unlock(fe->card); return 0; } static int soc_compr_trigger(struct snd_compr_stream *cstream, int cmd) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0); struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; snd_soc_dpcm_mutex_lock(rtd); ret = snd_soc_component_compr_trigger(cstream, cmd); if (ret < 0) goto out; ret = snd_soc_dai_compr_trigger(cpu_dai, cstream, cmd); if (ret < 0) goto out; switch (cmd) { case SNDRV_PCM_TRIGGER_START: snd_soc_dai_digital_mute(codec_dai, 0, stream); break; case SNDRV_PCM_TRIGGER_STOP: snd_soc_dai_digital_mute(codec_dai, 1, stream); break; } out: snd_soc_dpcm_mutex_unlock(rtd); return ret; } static int soc_compr_trigger_fe(struct snd_compr_stream *cstream, int cmd) { struct snd_soc_pcm_runtime *fe = cstream->private_data; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(fe, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; if (cmd == SND_COMPR_TRIGGER_PARTIAL_DRAIN || cmd == SND_COMPR_TRIGGER_DRAIN) return snd_soc_component_compr_trigger(cstream, cmd); snd_soc_card_mutex_lock(fe->card); ret = snd_soc_dai_compr_trigger(cpu_dai, cstream, cmd); if (ret < 0) goto out; ret = snd_soc_component_compr_trigger(cstream, cmd); if (ret < 0) goto out; fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; ret = dpcm_be_dai_trigger(fe, stream, cmd); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: fe->dpcm[stream].state = SND_SOC_DPCM_STATE_START; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED; break; } out: fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; snd_soc_card_mutex_unlock(fe->card); return ret; } static int soc_compr_set_params(struct snd_compr_stream *cstream, struct snd_compr_params *params) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; snd_soc_dpcm_mutex_lock(rtd); /* * First we call set_params for the CPU DAI, then the component * driver this should configure the SoC side. If the machine has * compressed ops then we call that as well. The expectation is * that these callbacks will configure everything for this compress * path, like configuring a PCM port for a CODEC. */ ret = snd_soc_dai_compr_set_params(cpu_dai, cstream, params); if (ret < 0) goto err; ret = snd_soc_component_compr_set_params(cstream, params); if (ret < 0) goto err; ret = snd_soc_link_compr_set_params(cstream); if (ret < 0) goto err; snd_soc_dapm_stream_event(rtd, stream, SND_SOC_DAPM_STREAM_START); /* cancel any delayed stream shutdown that is pending */ rtd->pop_wait = 0; snd_soc_dpcm_mutex_unlock(rtd); cancel_delayed_work_sync(&rtd->delayed_work); return 0; err: snd_soc_dpcm_mutex_unlock(rtd); return ret; } static int soc_compr_set_params_fe(struct snd_compr_stream *cstream, struct snd_compr_params *params) { struct snd_soc_pcm_runtime *fe = cstream->private_data; struct snd_pcm_substream *fe_substream = fe->pcm->streams[cstream->direction].substream; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(fe, 0); int stream = cstream->direction; /* SND_COMPRESS_xxx is same as SNDRV_PCM_STREAM_xxx */ int ret; snd_soc_card_mutex_lock(fe->card); /* * Create an empty hw_params for the BE as the machine driver must * fix this up to match DSP decoder and ASRC configuration. * I.e. machine driver fixup for compressed BE is mandatory. */ memset(&fe->dpcm[fe_substream->stream].hw_params, 0, sizeof(struct snd_pcm_hw_params)); fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; snd_soc_dpcm_mutex_lock(fe); ret = dpcm_be_dai_hw_params(fe, stream); snd_soc_dpcm_mutex_unlock(fe); if (ret < 0) goto out; snd_soc_dpcm_mutex_lock(fe); ret = dpcm_be_dai_prepare(fe, stream); snd_soc_dpcm_mutex_unlock(fe); if (ret < 0) goto out; ret = snd_soc_dai_compr_set_params(cpu_dai, cstream, params); if (ret < 0) goto out; ret = snd_soc_component_compr_set_params(cstream, params); if (ret < 0) goto out; ret = snd_soc_link_compr_set_params(cstream); if (ret < 0) goto out; snd_soc_dpcm_mutex_lock(fe); dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START); snd_soc_dpcm_mutex_unlock(fe); fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; out: fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; snd_soc_card_mutex_unlock(fe->card); return ret; } static int soc_compr_get_params(struct snd_compr_stream *cstream, struct snd_codec *params) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); int ret = 0; snd_soc_dpcm_mutex_lock(rtd); ret = snd_soc_dai_compr_get_params(cpu_dai, cstream, params); if (ret < 0) goto err; ret = snd_soc_component_compr_get_params(cstream, params); err: snd_soc_dpcm_mutex_unlock(rtd); return ret; } static int soc_compr_ack(struct snd_compr_stream *cstream, size_t bytes) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); int ret; snd_soc_dpcm_mutex_lock(rtd); ret = snd_soc_dai_compr_ack(cpu_dai, cstream, bytes); if (ret < 0) goto err; ret = snd_soc_component_compr_ack(cstream, bytes); err: snd_soc_dpcm_mutex_unlock(rtd); return ret; } static int soc_compr_pointer(struct snd_compr_stream *cstream, struct snd_compr_tstamp *tstamp) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; int ret; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); snd_soc_dpcm_mutex_lock(rtd); ret = snd_soc_dai_compr_pointer(cpu_dai, cstream, tstamp); if (ret < 0) goto out; ret = snd_soc_component_compr_pointer(cstream, tstamp); out: snd_soc_dpcm_mutex_unlock(rtd); return ret; } static int soc_compr_set_metadata(struct snd_compr_stream *cstream, struct snd_compr_metadata *metadata) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); int ret; ret = snd_soc_dai_compr_set_metadata(cpu_dai, cstream, metadata); if (ret < 0) return ret; return snd_soc_component_compr_set_metadata(cstream, metadata); } static int soc_compr_get_metadata(struct snd_compr_stream *cstream, struct snd_compr_metadata *metadata) { struct snd_soc_pcm_runtime *rtd = cstream->private_data; struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); int ret; ret = snd_soc_dai_compr_get_metadata(cpu_dai, cstream, metadata); if (ret < 0) return ret; return snd_soc_component_compr_get_metadata(cstream, metadata); } /* ASoC Compress operations */ static struct snd_compr_ops soc_compr_ops = { .open = soc_compr_open, .free = soc_compr_free, .set_params = soc_compr_set_params, .set_metadata = soc_compr_set_metadata, .get_metadata = soc_compr_get_metadata, .get_params = soc_compr_get_params, .trigger = soc_compr_trigger, .pointer = soc_compr_pointer, .ack = soc_compr_ack, .get_caps = snd_soc_component_compr_get_caps, .get_codec_caps = snd_soc_component_compr_get_codec_caps, }; /* ASoC Dynamic Compress operations */ static struct snd_compr_ops soc_compr_dyn_ops = { .open = soc_compr_open_fe, .free = soc_compr_free_fe, .set_params = soc_compr_set_params_fe, .get_params = soc_compr_get_params, .set_metadata = soc_compr_set_metadata, .get_metadata = soc_compr_get_metadata, .trigger = soc_compr_trigger_fe, .pointer = soc_compr_pointer, .ack = soc_compr_ack, .get_caps = snd_soc_component_compr_get_caps, .get_codec_caps = snd_soc_component_compr_get_codec_caps, }; /** * snd_soc_new_compress - create a new compress. * * @rtd: The runtime for which we will create compress * * Return: 0 for success, else error. */ int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_component *component; struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(rtd, 0); struct snd_soc_dai *cpu_dai = snd_soc_rtd_to_cpu(rtd, 0); struct snd_compr *compr; struct snd_pcm *be_pcm; char new_name[64]; int ret = 0, direction = 0; int playback = 0, capture = 0; int i; /* * make sure these are same value, * and then use these as equally */ BUILD_BUG_ON((int)SNDRV_PCM_STREAM_PLAYBACK != (int)SND_COMPRESS_PLAYBACK); BUILD_BUG_ON((int)SNDRV_PCM_STREAM_CAPTURE != (int)SND_COMPRESS_CAPTURE); if (rtd->dai_link->num_cpus > 1 || rtd->dai_link->num_codecs > 1) { dev_err(rtd->card->dev, "Compress ASoC: Multi CPU/Codec not supported\n"); return -EINVAL; } if (!codec_dai) { dev_err(rtd->card->dev, "Missing codec\n"); return -EINVAL; } /* check client and interface hw capabilities */ if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_PLAYBACK) && snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_PLAYBACK)) playback = 1; if (snd_soc_dai_stream_valid(codec_dai, SNDRV_PCM_STREAM_CAPTURE) && snd_soc_dai_stream_valid(cpu_dai, SNDRV_PCM_STREAM_CAPTURE)) capture = 1; /* * Compress devices are unidirectional so only one of the directions * should be set, check for that (xor) */ if (playback + capture != 1) { dev_err(rtd->card->dev, "Compress ASoC: Invalid direction for P %d, C %d\n", playback, capture); return -EINVAL; } if (playback) direction = SND_COMPRESS_PLAYBACK; else direction = SND_COMPRESS_CAPTURE; compr = devm_kzalloc(rtd->card->dev, sizeof(*compr), GFP_KERNEL); if (!compr) return -ENOMEM; compr->ops = devm_kzalloc(rtd->card->dev, sizeof(soc_compr_ops), GFP_KERNEL); if (!compr->ops) return -ENOMEM; if (rtd->dai_link->dynamic) { int playback = 1; int capture = 1; if (rtd->dai_link->capture_only) playback = 0; if (rtd->dai_link->playback_only) capture = 0; snprintf(new_name, sizeof(new_name), "(%s)", rtd->dai_link->stream_name); ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, rtd->id, playback, capture, &be_pcm); if (ret < 0) { dev_err(rtd->card->dev, "Compress ASoC: can't create compressed for %s: %d\n", rtd->dai_link->name, ret); return ret; } /* inherit atomicity from DAI link */ be_pcm->nonatomic = rtd->dai_link->nonatomic; rtd->pcm = be_pcm; rtd->fe_compr = 1; if (playback) be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd; if (capture) be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd; memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops)); } else { snprintf(new_name, sizeof(new_name), "%s %s-%d", rtd->dai_link->stream_name, codec_dai->name, rtd->id); memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops)); } for_each_rtd_components(rtd, i, component) { if (!component->driver->compress_ops || !component->driver->compress_ops->copy) continue; compr->ops->copy = snd_soc_component_compr_copy; break; } ret = snd_compress_new(rtd->card->snd_card, rtd->id, direction, new_name, compr); if (ret < 0) { component = snd_soc_rtd_to_codec(rtd, 0)->component; dev_err(component->dev, "Compress ASoC: can't create compress for codec %s: %d\n", component->name, ret); return ret; } /* DAPM dai link stream work */ rtd->close_delayed_work_func = snd_soc_close_delayed_work; rtd->compr = compr; compr->private_data = rtd; dev_dbg(rtd->card->dev, "Compress ASoC: %s <-> %s mapping ok\n", codec_dai->name, cpu_dai->name); return 0; } EXPORT_SYMBOL_GPL(snd_soc_new_compress);
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2020 Bytedance. */ partitions { compatible = "fixed-partitions"; #address-cells = <1>; #size-cells = <1>; u-boot@0 { reg = <0x0 0xe0000>; // 896KB label = "alt-u-boot"; }; u-boot-env@e0000 { reg = <0xe0000 0x20000>; // 128KB label = "alt-u-boot-env"; }; kernel@100000 { reg = <0x100000 0x900000>; // 9MB label = "alt-kernel"; }; rofs@a00000 { reg = <0xa00000 0x2000000>; // 32MB label = "alt-rofs"; }; rwfs@6000000 { reg = <0x2a00000 0x1600000>; // 22MB label = "alt-rwfs"; }; };
// SPDX-License-Identifier: GPL-2.0-only /* * AMD Secure Encrypted Virtualization (SEV) guest driver interface * * Copyright (C) 2021-2024 Advanced Micro Devices, Inc. * * Author: Brijesh Singh <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/mutex.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/miscdevice.h> #include <linux/set_memory.h> #include <linux/fs.h> #include <linux/tsm.h> #include <crypto/gcm.h> #include <linux/psp-sev.h> #include <linux/sockptr.h> #include <linux/cleanup.h> #include <linux/uuid.h> #include <linux/configfs.h> #include <uapi/linux/sev-guest.h> #include <uapi/linux/psp-sev.h> #include <asm/svm.h> #include <asm/sev.h> #define DEVICE_NAME "sev-guest" #define SNP_REQ_MAX_RETRY_DURATION (60*HZ) #define SNP_REQ_RETRY_DELAY (2*HZ) #define SVSM_MAX_RETRIES 3 struct snp_guest_dev { struct device *dev; struct miscdevice misc; struct snp_msg_desc *msg_desc; union { struct snp_report_req report; struct snp_derived_key_req derived_key; struct snp_ext_report_req ext_report; } req; }; /* * The VMPCK ID represents the key used by the SNP guest to communicate with the * SEV firmware in the AMD Secure Processor (ASP, aka PSP). By default, the key * used will be the key associated with the VMPL at which the guest is running. * Should the default key be wiped (see snp_disable_vmpck()), this parameter * allows for using one of the remaining VMPCKs. */ static int vmpck_id = -1; module_param(vmpck_id, int, 0444); MODULE_PARM_DESC(vmpck_id, "The VMPCK ID to use when communicating with the PSP."); /* Mutex to serialize the shared buffer access and command handling. */ static DEFINE_MUTEX(snp_cmd_mutex); static bool is_vmpck_empty(struct snp_msg_desc *mdesc) { char zero_key[VMPCK_KEY_LEN] = {0}; if (mdesc->vmpck) return !memcmp(mdesc->vmpck, zero_key, VMPCK_KEY_LEN); return true; } /* * If an error is received from the host or AMD Secure Processor (ASP) there * are two options. Either retry the exact same encrypted request or discontinue * using the VMPCK. * * This is because in the current encryption scheme GHCB v2 uses AES-GCM to * encrypt the requests. The IV for this scheme is the sequence number. GCM * cannot tolerate IV reuse. * * The ASP FW v1.51 only increments the sequence numbers on a successful * guest<->ASP back and forth and only accepts messages at its exact sequence * number. * * So if the sequence number were to be reused the encryption scheme is * vulnerable. If the sequence number were incremented for a fresh IV the ASP * will reject the request. */ static void snp_disable_vmpck(struct snp_msg_desc *mdesc) { pr_alert("Disabling VMPCK%d communication key to prevent IV reuse.\n", vmpck_id); memzero_explicit(mdesc->vmpck, VMPCK_KEY_LEN); mdesc->vmpck = NULL; } static inline u64 __snp_get_msg_seqno(struct snp_msg_desc *mdesc) { u64 count; lockdep_assert_held(&snp_cmd_mutex); /* Read the current message sequence counter from secrets pages */ count = *mdesc->os_area_msg_seqno; return count + 1; } /* Return a non-zero on success */ static u64 snp_get_msg_seqno(struct snp_msg_desc *mdesc) { u64 count = __snp_get_msg_seqno(mdesc); /* * The message sequence counter for the SNP guest request is a 64-bit * value but the version 2 of GHCB specification defines a 32-bit storage * for it. If the counter exceeds the 32-bit value then return zero. * The caller should check the return value, but if the caller happens to * not check the value and use it, then the firmware treats zero as an * invalid number and will fail the message request. */ if (count >= UINT_MAX) { pr_err("request message sequence counter overflow\n"); return 0; } return count; } static void snp_inc_msg_seqno(struct snp_msg_desc *mdesc) { /* * The counter is also incremented by the PSP, so increment it by 2 * and save in secrets page. */ *mdesc->os_area_msg_seqno += 2; } static inline struct snp_guest_dev *to_snp_dev(struct file *file) { struct miscdevice *dev = file->private_data; return container_of(dev, struct snp_guest_dev, misc); } static struct aesgcm_ctx *snp_init_crypto(u8 *key, size_t keylen) { struct aesgcm_ctx *ctx; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL_ACCOUNT); if (!ctx) return NULL; if (aesgcm_expandkey(ctx, key, keylen, AUTHTAG_LEN)) { pr_err("Crypto context initialization failed\n"); kfree(ctx); return NULL; } return ctx; } static int verify_and_dec_payload(struct snp_msg_desc *mdesc, struct snp_guest_req *req) { struct snp_guest_msg *resp_msg = &mdesc->secret_response; struct snp_guest_msg *req_msg = &mdesc->secret_request; struct snp_guest_msg_hdr *req_msg_hdr = &req_msg->hdr; struct snp_guest_msg_hdr *resp_msg_hdr = &resp_msg->hdr; struct aesgcm_ctx *ctx = mdesc->ctx; u8 iv[GCM_AES_IV_SIZE] = {}; pr_debug("response [seqno %lld type %d version %d sz %d]\n", resp_msg_hdr->msg_seqno, resp_msg_hdr->msg_type, resp_msg_hdr->msg_version, resp_msg_hdr->msg_sz); /* Copy response from shared memory to encrypted memory. */ memcpy(resp_msg, mdesc->response, sizeof(*resp_msg)); /* Verify that the sequence counter is incremented by 1 */ if (unlikely(resp_msg_hdr->msg_seqno != (req_msg_hdr->msg_seqno + 1))) return -EBADMSG; /* Verify response message type and version number. */ if (resp_msg_hdr->msg_type != (req_msg_hdr->msg_type + 1) || resp_msg_hdr->msg_version != req_msg_hdr->msg_version) return -EBADMSG; /* * If the message size is greater than our buffer length then return * an error. */ if (unlikely((resp_msg_hdr->msg_sz + ctx->authsize) > req->resp_sz)) return -EBADMSG; /* Decrypt the payload */ memcpy(iv, &resp_msg_hdr->msg_seqno, min(sizeof(iv), sizeof(resp_msg_hdr->msg_seqno))); if (!aesgcm_decrypt(ctx, req->resp_buf, resp_msg->payload, resp_msg_hdr->msg_sz, &resp_msg_hdr->algo, AAD_LEN, iv, resp_msg_hdr->authtag)) return -EBADMSG; return 0; } static int enc_payload(struct snp_msg_desc *mdesc, u64 seqno, struct snp_guest_req *req) { struct snp_guest_msg *msg = &mdesc->secret_request; struct snp_guest_msg_hdr *hdr = &msg->hdr; struct aesgcm_ctx *ctx = mdesc->ctx; u8 iv[GCM_AES_IV_SIZE] = {}; memset(msg, 0, sizeof(*msg)); hdr->algo = SNP_AEAD_AES_256_GCM; hdr->hdr_version = MSG_HDR_VER; hdr->hdr_sz = sizeof(*hdr); hdr->msg_type = req->msg_type; hdr->msg_version = req->msg_version; hdr->msg_seqno = seqno; hdr->msg_vmpck = req->vmpck_id; hdr->msg_sz = req->req_sz; /* Verify the sequence number is non-zero */ if (!hdr->msg_seqno) return -ENOSR; pr_debug("request [seqno %lld type %d version %d sz %d]\n", hdr->msg_seqno, hdr->msg_type, hdr->msg_version, hdr->msg_sz); if (WARN_ON((req->req_sz + ctx->authsize) > sizeof(msg->payload))) return -EBADMSG; memcpy(iv, &hdr->msg_seqno, min(sizeof(iv), sizeof(hdr->msg_seqno))); aesgcm_encrypt(ctx, msg->payload, req->req_buf, req->req_sz, &hdr->algo, AAD_LEN, iv, hdr->authtag); return 0; } static int __handle_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, struct snp_guest_request_ioctl *rio) { unsigned long req_start = jiffies; unsigned int override_npages = 0; u64 override_err = 0; int rc; retry_request: /* * Call firmware to process the request. In this function the encrypted * message enters shared memory with the host. So after this call the * sequence number must be incremented or the VMPCK must be deleted to * prevent reuse of the IV. */ rc = snp_issue_guest_request(req, &mdesc->input, rio); switch (rc) { case -ENOSPC: /* * If the extended guest request fails due to having too * small of a certificate data buffer, retry the same * guest request without the extended data request in * order to increment the sequence number and thus avoid * IV reuse. */ override_npages = mdesc->input.data_npages; req->exit_code = SVM_VMGEXIT_GUEST_REQUEST; /* * Override the error to inform callers the given extended * request buffer size was too small and give the caller the * required buffer size. */ override_err = SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN); /* * If this call to the firmware succeeds, the sequence number can * be incremented allowing for continued use of the VMPCK. If * there is an error reflected in the return value, this value * is checked further down and the result will be the deletion * of the VMPCK and the error code being propagated back to the * user as an ioctl() return code. */ goto retry_request; /* * The host may return SNP_GUEST_VMM_ERR_BUSY if the request has been * throttled. Retry in the driver to avoid returning and reusing the * message sequence number on a different message. */ case -EAGAIN: if (jiffies - req_start > SNP_REQ_MAX_RETRY_DURATION) { rc = -ETIMEDOUT; break; } schedule_timeout_killable(SNP_REQ_RETRY_DELAY); goto retry_request; } /* * Increment the message sequence number. There is no harm in doing * this now because decryption uses the value stored in the response * structure and any failure will wipe the VMPCK, preventing further * use anyway. */ snp_inc_msg_seqno(mdesc); if (override_err) { rio->exitinfo2 = override_err; /* * If an extended guest request was issued and the supplied certificate * buffer was not large enough, a standard guest request was issued to * prevent IV reuse. If the standard request was successful, return -EIO * back to the caller as would have originally been returned. */ if (!rc && override_err == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN)) rc = -EIO; } if (override_npages) mdesc->input.data_npages = override_npages; return rc; } static int snp_send_guest_request(struct snp_msg_desc *mdesc, struct snp_guest_req *req, struct snp_guest_request_ioctl *rio) { u64 seqno; int rc; guard(mutex)(&snp_cmd_mutex); /* Check if the VMPCK is not empty */ if (is_vmpck_empty(mdesc)) { pr_err_ratelimited("VMPCK is disabled\n"); return -ENOTTY; } /* Get message sequence and verify that its a non-zero */ seqno = snp_get_msg_seqno(mdesc); if (!seqno) return -EIO; /* Clear shared memory's response for the host to populate. */ memset(mdesc->response, 0, sizeof(struct snp_guest_msg)); /* Encrypt the userspace provided payload in mdesc->secret_request. */ rc = enc_payload(mdesc, seqno, req); if (rc) return rc; /* * Write the fully encrypted request to the shared unencrypted * request page. */ memcpy(mdesc->request, &mdesc->secret_request, sizeof(mdesc->secret_request)); rc = __handle_guest_request(mdesc, req, rio); if (rc) { if (rc == -EIO && rio->exitinfo2 == SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN)) return rc; pr_alert("Detected error from ASP request. rc: %d, exitinfo2: 0x%llx\n", rc, rio->exitinfo2); snp_disable_vmpck(mdesc); return rc; } rc = verify_and_dec_payload(mdesc, req); if (rc) { pr_alert("Detected unexpected decode failure from ASP. rc: %d\n", rc); snp_disable_vmpck(mdesc); return rc; } return 0; } struct snp_req_resp { sockptr_t req_data; sockptr_t resp_data; }; static int get_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { struct snp_report_req *report_req = &snp_dev->req.report; struct snp_msg_desc *mdesc = snp_dev->msg_desc; struct snp_report_resp *report_resp; struct snp_guest_req req = {}; int rc, resp_len; if (!arg->req_data || !arg->resp_data) return -EINVAL; if (copy_from_user(report_req, (void __user *)arg->req_data, sizeof(*report_req))) return -EFAULT; /* * The intermediate response buffer is used while decrypting the * response payload. Make sure that it has enough space to cover the * authtag. */ resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize; report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); if (!report_resp) return -ENOMEM; req.msg_version = arg->msg_version; req.msg_type = SNP_MSG_REPORT_REQ; req.vmpck_id = vmpck_id; req.req_buf = report_req; req.req_sz = sizeof(*report_req); req.resp_buf = report_resp->data; req.resp_sz = resp_len; req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; rc = snp_send_guest_request(mdesc, &req, arg); if (rc) goto e_free; if (copy_to_user((void __user *)arg->resp_data, report_resp, sizeof(*report_resp))) rc = -EFAULT; e_free: kfree(report_resp); return rc; } static int get_derived_key(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg) { struct snp_derived_key_req *derived_key_req = &snp_dev->req.derived_key; struct snp_derived_key_resp derived_key_resp = {0}; struct snp_msg_desc *mdesc = snp_dev->msg_desc; struct snp_guest_req req = {}; int rc, resp_len; /* Response data is 64 bytes and max authsize for GCM is 16 bytes. */ u8 buf[64 + 16]; if (!arg->req_data || !arg->resp_data) return -EINVAL; /* * The intermediate response buffer is used while decrypting the * response payload. Make sure that it has enough space to cover the * authtag. */ resp_len = sizeof(derived_key_resp.data) + mdesc->ctx->authsize; if (sizeof(buf) < resp_len) return -ENOMEM; if (copy_from_user(derived_key_req, (void __user *)arg->req_data, sizeof(*derived_key_req))) return -EFAULT; req.msg_version = arg->msg_version; req.msg_type = SNP_MSG_KEY_REQ; req.vmpck_id = vmpck_id; req.req_buf = derived_key_req; req.req_sz = sizeof(*derived_key_req); req.resp_buf = buf; req.resp_sz = resp_len; req.exit_code = SVM_VMGEXIT_GUEST_REQUEST; rc = snp_send_guest_request(mdesc, &req, arg); if (rc) return rc; memcpy(derived_key_resp.data, buf, sizeof(derived_key_resp.data)); if (copy_to_user((void __user *)arg->resp_data, &derived_key_resp, sizeof(derived_key_resp))) rc = -EFAULT; /* The response buffer contains the sensitive data, explicitly clear it. */ memzero_explicit(buf, sizeof(buf)); memzero_explicit(&derived_key_resp, sizeof(derived_key_resp)); return rc; } static int get_ext_report(struct snp_guest_dev *snp_dev, struct snp_guest_request_ioctl *arg, struct snp_req_resp *io) { struct snp_ext_report_req *report_req = &snp_dev->req.ext_report; struct snp_msg_desc *mdesc = snp_dev->msg_desc; struct snp_report_resp *report_resp; struct snp_guest_req req = {}; int ret, npages = 0, resp_len; sockptr_t certs_address; if (sockptr_is_null(io->req_data) || sockptr_is_null(io->resp_data)) return -EINVAL; if (copy_from_sockptr(report_req, io->req_data, sizeof(*report_req))) return -EFAULT; /* caller does not want certificate data */ if (!report_req->certs_len || !report_req->certs_address) goto cmd; if (report_req->certs_len > SEV_FW_BLOB_MAX_SIZE || !IS_ALIGNED(report_req->certs_len, PAGE_SIZE)) return -EINVAL; if (sockptr_is_kernel(io->resp_data)) { certs_address = KERNEL_SOCKPTR((void *)report_req->certs_address); } else { certs_address = USER_SOCKPTR((void __user *)report_req->certs_address); if (!access_ok(certs_address.user, report_req->certs_len)) return -EFAULT; } /* * Initialize the intermediate buffer with all zeros. This buffer * is used in the guest request message to get the certs blob from * the host. If host does not supply any certs in it, then copy * zeros to indicate that certificate data was not provided. */ memset(mdesc->certs_data, 0, report_req->certs_len); npages = report_req->certs_len >> PAGE_SHIFT; cmd: /* * The intermediate response buffer is used while decrypting the * response payload. Make sure that it has enough space to cover the * authtag. */ resp_len = sizeof(report_resp->data) + mdesc->ctx->authsize; report_resp = kzalloc(resp_len, GFP_KERNEL_ACCOUNT); if (!report_resp) return -ENOMEM; mdesc->input.data_npages = npages; req.msg_version = arg->msg_version; req.msg_type = SNP_MSG_REPORT_REQ; req.vmpck_id = vmpck_id; req.req_buf = &report_req->data; req.req_sz = sizeof(report_req->data); req.resp_buf = report_resp->data; req.resp_sz = resp_len; req.exit_code = SVM_VMGEXIT_EXT_GUEST_REQUEST; ret = snp_send_guest_request(mdesc, &req, arg); /* If certs length is invalid then copy the returned length */ if (arg->vmm_error == SNP_GUEST_VMM_ERR_INVALID_LEN) { report_req->certs_len = mdesc->input.data_npages << PAGE_SHIFT; if (copy_to_sockptr(io->req_data, report_req, sizeof(*report_req))) ret = -EFAULT; } if (ret) goto e_free; if (npages && copy_to_sockptr(certs_address, mdesc->certs_data, report_req->certs_len)) { ret = -EFAULT; goto e_free; } if (copy_to_sockptr(io->resp_data, report_resp, sizeof(*report_resp))) ret = -EFAULT; e_free: kfree(report_resp); return ret; } static long snp_guest_ioctl(struct file *file, unsigned int ioctl, unsigned long arg) { struct snp_guest_dev *snp_dev = to_snp_dev(file); void __user *argp = (void __user *)arg; struct snp_guest_request_ioctl input; struct snp_req_resp io; int ret = -ENOTTY; if (copy_from_user(&input, argp, sizeof(input))) return -EFAULT; input.exitinfo2 = 0xff; /* Message version must be non-zero */ if (!input.msg_version) return -EINVAL; switch (ioctl) { case SNP_GET_REPORT: ret = get_report(snp_dev, &input); break; case SNP_GET_DERIVED_KEY: ret = get_derived_key(snp_dev, &input); break; case SNP_GET_EXT_REPORT: /* * As get_ext_report() may be called from the ioctl() path and a * kernel internal path (configfs-tsm), decorate the passed * buffers as user pointers. */ io.req_data = USER_SOCKPTR((void __user *)input.req_data); io.resp_data = USER_SOCKPTR((void __user *)input.resp_data); ret = get_ext_report(snp_dev, &input, &io); break; default: break; } if (input.exitinfo2 && copy_to_user(argp, &input, sizeof(input))) return -EFAULT; return ret; } static void free_shared_pages(void *buf, size_t sz) { unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; int ret; if (!buf) return; ret = set_memory_encrypted((unsigned long)buf, npages); if (ret) { WARN_ONCE(ret, "failed to restore encryption mask (leak it)\n"); return; } __free_pages(virt_to_page(buf), get_order(sz)); } static void *alloc_shared_pages(struct device *dev, size_t sz) { unsigned int npages = PAGE_ALIGN(sz) >> PAGE_SHIFT; struct page *page; int ret; page = alloc_pages(GFP_KERNEL_ACCOUNT, get_order(sz)); if (!page) return NULL; ret = set_memory_decrypted((unsigned long)page_address(page), npages); if (ret) { dev_err(dev, "failed to mark page shared, ret=%d\n", ret); __free_pages(page, get_order(sz)); return NULL; } return page_address(page); } static const struct file_operations snp_guest_fops = { .owner = THIS_MODULE, .unlocked_ioctl = snp_guest_ioctl, }; static u8 *get_vmpck(int id, struct snp_secrets_page *secrets, u32 **seqno) { u8 *key = NULL; switch (id) { case 0: *seqno = &secrets->os_area.msg_seqno_0; key = secrets->vmpck0; break; case 1: *seqno = &secrets->os_area.msg_seqno_1; key = secrets->vmpck1; break; case 2: *seqno = &secrets->os_area.msg_seqno_2; key = secrets->vmpck2; break; case 3: *seqno = &secrets->os_area.msg_seqno_3; key = secrets->vmpck3; break; default: break; } return key; } struct snp_msg_report_resp_hdr { u32 status; u32 report_size; u8 rsvd[24]; }; struct snp_msg_cert_entry { guid_t guid; u32 offset; u32 length; }; static int sev_svsm_report_new(struct tsm_report *report, void *data) { unsigned int rep_len, man_len, certs_len; struct tsm_desc *desc = &report->desc; struct svsm_attest_call ac = {}; unsigned int retry_count; void *rep, *man, *certs; struct svsm_call call; unsigned int size; bool try_again; void *buffer; u64 call_id; int ret; /* * Allocate pages for the request: * - Report blob (4K) * - Manifest blob (4K) * - Certificate blob (16K) * * Above addresses must be 4K aligned */ rep_len = SZ_4K; man_len = SZ_4K; certs_len = SEV_FW_BLOB_MAX_SIZE; if (guid_is_null(&desc->service_guid)) { call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SERVICES); } else { export_guid(ac.service_guid, &desc->service_guid); ac.service_manifest_ver = desc->service_manifest_version; call_id = SVSM_ATTEST_CALL(SVSM_ATTEST_SINGLE_SERVICE); } retry_count = 0; retry: memset(&call, 0, sizeof(call)); size = rep_len + man_len + certs_len; buffer = alloc_pages_exact(size, __GFP_ZERO); if (!buffer) return -ENOMEM; rep = buffer; ac.report_buf.pa = __pa(rep); ac.report_buf.len = rep_len; man = rep + rep_len; ac.manifest_buf.pa = __pa(man); ac.manifest_buf.len = man_len; certs = man + man_len; ac.certificates_buf.pa = __pa(certs); ac.certificates_buf.len = certs_len; ac.nonce.pa = __pa(desc->inblob); ac.nonce.len = desc->inblob_len; ret = snp_issue_svsm_attest_req(call_id, &call, &ac); if (ret) { free_pages_exact(buffer, size); switch (call.rax_out) { case SVSM_ERR_INVALID_PARAMETER: try_again = false; if (ac.report_buf.len > rep_len) { rep_len = PAGE_ALIGN(ac.report_buf.len); try_again = true; } if (ac.manifest_buf.len > man_len) { man_len = PAGE_ALIGN(ac.manifest_buf.len); try_again = true; } if (ac.certificates_buf.len > certs_len) { certs_len = PAGE_ALIGN(ac.certificates_buf.len); try_again = true; } /* If one of the buffers wasn't large enough, retry the request */ if (try_again && retry_count < SVSM_MAX_RETRIES) { retry_count++; goto retry; } return -EINVAL; default: pr_err_ratelimited("SVSM attestation request failed (%d / 0x%llx)\n", ret, call.rax_out); return -EINVAL; } } /* * Allocate all the blob memory buffers at once so that the cleanup is * done for errors that occur after the first allocation (i.e. before * using no_free_ptr()). */ rep_len = ac.report_buf.len; void *rbuf __free(kvfree) = kvzalloc(rep_len, GFP_KERNEL); man_len = ac.manifest_buf.len; void *mbuf __free(kvfree) = kvzalloc(man_len, GFP_KERNEL); certs_len = ac.certificates_buf.len; void *cbuf __free(kvfree) = certs_len ? kvzalloc(certs_len, GFP_KERNEL) : NULL; if (!rbuf || !mbuf || (certs_len && !cbuf)) { free_pages_exact(buffer, size); return -ENOMEM; } memcpy(rbuf, rep, rep_len); report->outblob = no_free_ptr(rbuf); report->outblob_len = rep_len; memcpy(mbuf, man, man_len); report->manifestblob = no_free_ptr(mbuf); report->manifestblob_len = man_len; if (certs_len) { memcpy(cbuf, certs, certs_len); report->auxblob = no_free_ptr(cbuf); report->auxblob_len = certs_len; } free_pages_exact(buffer, size); return 0; } static int sev_report_new(struct tsm_report *report, void *data) { struct snp_msg_cert_entry *cert_table; struct tsm_desc *desc = &report->desc; struct snp_guest_dev *snp_dev = data; struct snp_msg_report_resp_hdr hdr; const u32 report_size = SZ_4K; const u32 ext_size = SEV_FW_BLOB_MAX_SIZE; u32 certs_size, i, size = report_size + ext_size; int ret; if (desc->inblob_len != SNP_REPORT_USER_DATA_SIZE) return -EINVAL; if (desc->service_provider) { if (strcmp(desc->service_provider, "svsm")) return -EINVAL; return sev_svsm_report_new(report, data); } void *buf __free(kvfree) = kvzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; cert_table = buf + report_size; struct snp_ext_report_req ext_req = { .data = { .vmpl = desc->privlevel }, .certs_address = (__u64)cert_table, .certs_len = ext_size, }; memcpy(&ext_req.data.user_data, desc->inblob, desc->inblob_len); struct snp_guest_request_ioctl input = { .msg_version = 1, .req_data = (__u64)&ext_req, .resp_data = (__u64)buf, .exitinfo2 = 0xff, }; struct snp_req_resp io = { .req_data = KERNEL_SOCKPTR(&ext_req), .resp_data = KERNEL_SOCKPTR(buf), }; ret = get_ext_report(snp_dev, &input, &io); if (ret) return ret; memcpy(&hdr, buf, sizeof(hdr)); if (hdr.status == SEV_RET_INVALID_PARAM) return -EINVAL; if (hdr.status == SEV_RET_INVALID_KEY) return -EINVAL; if (hdr.status) return -ENXIO; if ((hdr.report_size + sizeof(hdr)) > report_size) return -ENOMEM; void *rbuf __free(kvfree) = kvzalloc(hdr.report_size, GFP_KERNEL); if (!rbuf) return -ENOMEM; memcpy(rbuf, buf + sizeof(hdr), hdr.report_size); report->outblob = no_free_ptr(rbuf); report->outblob_len = hdr.report_size; certs_size = 0; for (i = 0; i < ext_size / sizeof(struct snp_msg_cert_entry); i++) { struct snp_msg_cert_entry *ent = &cert_table[i]; if (guid_is_null(&ent->guid) && !ent->offset && !ent->length) break; certs_size = max(certs_size, ent->offset + ent->length); } /* Suspicious that the response populated entries without populating size */ if (!certs_size && i) dev_warn_ratelimited(snp_dev->dev, "certificate slots conveyed without size\n"); /* No certs to report */ if (!certs_size) return 0; /* Suspicious that the certificate blob size contract was violated */ if (certs_size > ext_size) { dev_warn_ratelimited(snp_dev->dev, "certificate data truncated\n"); certs_size = ext_size; } void *cbuf __free(kvfree) = kvzalloc(certs_size, GFP_KERNEL); if (!cbuf) return -ENOMEM; memcpy(cbuf, cert_table, certs_size); report->auxblob = no_free_ptr(cbuf); report->auxblob_len = certs_size; return 0; } static bool sev_report_attr_visible(int n) { switch (n) { case TSM_REPORT_GENERATION: case TSM_REPORT_PROVIDER: case TSM_REPORT_PRIVLEVEL: case TSM_REPORT_PRIVLEVEL_FLOOR: return true; case TSM_REPORT_SERVICE_PROVIDER: case TSM_REPORT_SERVICE_GUID: case TSM_REPORT_SERVICE_MANIFEST_VER: return snp_vmpl; } return false; } static bool sev_report_bin_attr_visible(int n) { switch (n) { case TSM_REPORT_INBLOB: case TSM_REPORT_OUTBLOB: case TSM_REPORT_AUXBLOB: return true; case TSM_REPORT_MANIFESTBLOB: return snp_vmpl; } return false; } static struct tsm_ops sev_tsm_ops = { .name = KBUILD_MODNAME, .report_new = sev_report_new, .report_attr_visible = sev_report_attr_visible, .report_bin_attr_visible = sev_report_bin_attr_visible, }; static void unregister_sev_tsm(void *data) { tsm_unregister(&sev_tsm_ops); } static int __init sev_guest_probe(struct platform_device *pdev) { struct sev_guest_platform_data *data; struct snp_secrets_page *secrets; struct device *dev = &pdev->dev; struct snp_guest_dev *snp_dev; struct snp_msg_desc *mdesc; struct miscdevice *misc; void __iomem *mapping; int ret; BUILD_BUG_ON(sizeof(struct snp_guest_msg) > PAGE_SIZE); if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) return -ENODEV; if (!dev->platform_data) return -ENODEV; data = (struct sev_guest_platform_data *)dev->platform_data; mapping = ioremap_encrypted(data->secrets_gpa, PAGE_SIZE); if (!mapping) return -ENODEV; secrets = (__force void *)mapping; ret = -ENOMEM; snp_dev = devm_kzalloc(&pdev->dev, sizeof(struct snp_guest_dev), GFP_KERNEL); if (!snp_dev) goto e_unmap; mdesc = devm_kzalloc(&pdev->dev, sizeof(struct snp_msg_desc), GFP_KERNEL); if (!mdesc) goto e_unmap; /* Adjust the default VMPCK key based on the executing VMPL level */ if (vmpck_id == -1) vmpck_id = snp_vmpl; ret = -EINVAL; mdesc->vmpck = get_vmpck(vmpck_id, secrets, &mdesc->os_area_msg_seqno); if (!mdesc->vmpck) { dev_err(dev, "Invalid VMPCK%d communication key\n", vmpck_id); goto e_unmap; } /* Verify that VMPCK is not zero. */ if (is_vmpck_empty(mdesc)) { dev_err(dev, "Empty VMPCK%d communication key\n", vmpck_id); goto e_unmap; } platform_set_drvdata(pdev, snp_dev); snp_dev->dev = dev; mdesc->secrets = secrets; /* Allocate the shared page used for the request and response message. */ mdesc->request = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); if (!mdesc->request) goto e_unmap; mdesc->response = alloc_shared_pages(dev, sizeof(struct snp_guest_msg)); if (!mdesc->response) goto e_free_request; mdesc->certs_data = alloc_shared_pages(dev, SEV_FW_BLOB_MAX_SIZE); if (!mdesc->certs_data) goto e_free_response; ret = -EIO; mdesc->ctx = snp_init_crypto(mdesc->vmpck, VMPCK_KEY_LEN); if (!mdesc->ctx) goto e_free_cert_data; misc = &snp_dev->misc; misc->minor = MISC_DYNAMIC_MINOR; misc->name = DEVICE_NAME; misc->fops = &snp_guest_fops; /* Initialize the input addresses for guest request */ mdesc->input.req_gpa = __pa(mdesc->request); mdesc->input.resp_gpa = __pa(mdesc->response); mdesc->input.data_gpa = __pa(mdesc->certs_data); /* Set the privlevel_floor attribute based on the vmpck_id */ sev_tsm_ops.privlevel_floor = vmpck_id; ret = tsm_register(&sev_tsm_ops, snp_dev); if (ret) goto e_free_cert_data; ret = devm_add_action_or_reset(&pdev->dev, unregister_sev_tsm, NULL); if (ret) goto e_free_cert_data; ret = misc_register(misc); if (ret) goto e_free_ctx; snp_dev->msg_desc = mdesc; dev_info(dev, "Initialized SEV guest driver (using VMPCK%d communication key)\n", vmpck_id); return 0; e_free_ctx: kfree(mdesc->ctx); e_free_cert_data: free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE); e_free_response: free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg)); e_free_request: free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); e_unmap: iounmap(mapping); return ret; } static void __exit sev_guest_remove(struct platform_device *pdev) { struct snp_guest_dev *snp_dev = platform_get_drvdata(pdev); struct snp_msg_desc *mdesc = snp_dev->msg_desc; free_shared_pages(mdesc->certs_data, SEV_FW_BLOB_MAX_SIZE); free_shared_pages(mdesc->response, sizeof(struct snp_guest_msg)); free_shared_pages(mdesc->request, sizeof(struct snp_guest_msg)); kfree(mdesc->ctx); misc_deregister(&snp_dev->misc); } /* * This driver is meant to be a common SEV guest interface driver and to * support any SEV guest API. As such, even though it has been introduced * with the SEV-SNP support, it is named "sev-guest". * * sev_guest_remove() lives in .exit.text. For drivers registered via * module_platform_driver_probe() this is ok because they cannot get unbound * at runtime. So mark the driver struct with __refdata to prevent modpost * triggering a section mismatch warning. */ static struct platform_driver sev_guest_driver __refdata = { .remove = __exit_p(sev_guest_remove), .driver = { .name = "sev-guest", }, }; module_platform_driver_probe(sev_guest_driver, sev_guest_probe); MODULE_AUTHOR("Brijesh Singh <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); MODULE_DESCRIPTION("AMD SEV Guest Driver"); MODULE_ALIAS("platform:sev-guest");
// SPDX-License-Identifier: GPL-2.0-only #include "linux/types.h" #include "linux/bitmap.h" #include "linux/atomic.h" #include "kvm_util.h" #include "ucall_common.h" #define GUEST_UCALL_FAILED -1 struct ucall_header { DECLARE_BITMAP(in_use, KVM_MAX_VCPUS); struct ucall ucalls[KVM_MAX_VCPUS]; }; int ucall_nr_pages_required(uint64_t page_size) { return align_up(sizeof(struct ucall_header), page_size) / page_size; } /* * ucall_pool holds per-VM values (global data is duplicated by each VM), it * must not be accessed from host code. */ static struct ucall_header *ucall_pool; void ucall_init(struct kvm_vm *vm, vm_paddr_t mmio_gpa) { struct ucall_header *hdr; struct ucall *uc; vm_vaddr_t vaddr; int i; vaddr = vm_vaddr_alloc_shared(vm, sizeof(*hdr), KVM_UTIL_MIN_VADDR, MEM_REGION_DATA); hdr = (struct ucall_header *)addr_gva2hva(vm, vaddr); memset(hdr, 0, sizeof(*hdr)); for (i = 0; i < KVM_MAX_VCPUS; ++i) { uc = &hdr->ucalls[i]; uc->hva = uc; } write_guest_global(vm, ucall_pool, (struct ucall_header *)vaddr); ucall_arch_init(vm, mmio_gpa); } static struct ucall *ucall_alloc(void) { struct ucall *uc; int i; if (!ucall_pool) goto ucall_failed; for (i = 0; i < KVM_MAX_VCPUS; ++i) { if (!test_and_set_bit(i, ucall_pool->in_use)) { uc = &ucall_pool->ucalls[i]; memset(uc->args, 0, sizeof(uc->args)); return uc; } } ucall_failed: /* * If the vCPU cannot grab a ucall structure, make a bare ucall with a * magic value to signal to get_ucall() that things went sideways. * GUEST_ASSERT() depends on ucall_alloc() and so cannot be used here. */ ucall_arch_do_ucall(GUEST_UCALL_FAILED); return NULL; } static void ucall_free(struct ucall *uc) { /* Beware, here be pointer arithmetic. */ clear_bit(uc - ucall_pool->ucalls, ucall_pool->in_use); } void ucall_assert(uint64_t cmd, const char *exp, const char *file, unsigned int line, const char *fmt, ...) { struct ucall *uc; va_list va; uc = ucall_alloc(); uc->cmd = cmd; WRITE_ONCE(uc->args[GUEST_ERROR_STRING], (uint64_t)(exp)); WRITE_ONCE(uc->args[GUEST_FILE], (uint64_t)(file)); WRITE_ONCE(uc->args[GUEST_LINE], line); va_start(va, fmt); guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); va_end(va); ucall_arch_do_ucall((vm_vaddr_t)uc->hva); ucall_free(uc); } void ucall_fmt(uint64_t cmd, const char *fmt, ...) { struct ucall *uc; va_list va; uc = ucall_alloc(); uc->cmd = cmd; va_start(va, fmt); guest_vsnprintf(uc->buffer, UCALL_BUFFER_LEN, fmt, va); va_end(va); ucall_arch_do_ucall((vm_vaddr_t)uc->hva); ucall_free(uc); } void ucall(uint64_t cmd, int nargs, ...) { struct ucall *uc; va_list va; int i; uc = ucall_alloc(); WRITE_ONCE(uc->cmd, cmd); nargs = min(nargs, UCALL_MAX_ARGS); va_start(va, nargs); for (i = 0; i < nargs; ++i) WRITE_ONCE(uc->args[i], va_arg(va, uint64_t)); va_end(va); ucall_arch_do_ucall((vm_vaddr_t)uc->hva); ucall_free(uc); } uint64_t get_ucall(struct kvm_vcpu *vcpu, struct ucall *uc) { struct ucall ucall; void *addr; if (!uc) uc = &ucall; addr = ucall_arch_get_ucall(vcpu); if (addr) { TEST_ASSERT(addr != (void *)GUEST_UCALL_FAILED, "Guest failed to allocate ucall struct"); memcpy(uc, addr, sizeof(*uc)); vcpu_run_complete_io(vcpu); } else { memset(uc, 0, sizeof(*uc)); } return uc->cmd; }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Read flash partition table from command line * * Copyright © 2002 SYSGO Real-Time Solutions GmbH * Copyright © 2002-2010 David Woodhouse <[email protected]> * * The format for the command line is as follows: * * mtdparts=<mtddef>[;<mtddef] * <mtddef> := <mtd-id>:<partdef>[,<partdef>] * <partdef> := <size>[@<offset>][<name>][ro][lk][slc] * <mtd-id> := unique name used in mapping driver/device (mtd->name) * <size> := standard linux memsize OR "-" to denote all remaining space * size is automatically truncated at end of device * if specified or truncated size is 0 the part is skipped * <offset> := standard linux memsize * if omitted the part will immediately follow the previous part * or 0 if the first part * <name> := '(' NAME ')' * NAME will appear in /proc/mtd * * <size> and <offset> can be specified such that the parts are out of order * in physical memory and may even overlap. * * The parts are assigned MTD numbers in the order they are specified in the * command line regardless of their order in physical memory. * * Examples: * * 1 NOR Flash, with 1 single writable partition: * edb7312-nor:- * * 1 NOR Flash with 2 partitions, 1 NAND with one * edb7312-nor:256k(ARMboot)ro,-(root);edb7312-nand:-(home) */ #define pr_fmt(fmt) "mtd: " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/module.h> #include <linux/err.h> /* special size referring to all the remaining space in a partition */ #define SIZE_REMAINING ULLONG_MAX #define OFFSET_CONTINUOUS ULLONG_MAX struct cmdline_mtd_partition { struct cmdline_mtd_partition *next; char *mtd_id; int num_parts; struct mtd_partition *parts; }; /* mtdpart_setup() parses into here */ static struct cmdline_mtd_partition *partitions; /* the command line passed to mtdpart_setup() */ static char *mtdparts; static char *cmdline; static int cmdline_parsed; /* * Parse one partition definition for an MTD. Since there can be many * comma separated partition definitions, this function calls itself * recursively until no more partition definitions are found. Nice side * effect: the memory to keep the mtd_partition structs and the names * is allocated upon the last definition being found. At that point the * syntax has been verified ok. */ static struct mtd_partition * newpart(char *s, char **retptr, int *num_parts, int this_part, unsigned char **extra_mem_ptr, int extra_mem_size) { struct mtd_partition *parts; unsigned long long size, offset = OFFSET_CONTINUOUS; char *name; int name_len; unsigned char *extra_mem; char delim; unsigned int mask_flags, add_flags; /* fetch the partition size */ if (*s == '-') { /* assign all remaining space to this partition */ size = SIZE_REMAINING; s++; } else { size = memparse(s, &s); if (!size) { pr_err("partition has size 0\n"); return ERR_PTR(-EINVAL); } } /* fetch partition name and flags */ mask_flags = 0; /* this is going to be a regular partition */ add_flags = 0; delim = 0; /* check for offset */ if (*s == '@') { s++; offset = memparse(s, &s); } /* now look for name */ if (*s == '(') delim = ')'; if (delim) { char *p; name = ++s; p = strchr(name, delim); if (!p) { pr_err("no closing %c found in partition name\n", delim); return ERR_PTR(-EINVAL); } name_len = p - name; s = p + 1; } else { name = NULL; name_len = 13; /* Partition_000 */ } /* record name length for memory allocation later */ extra_mem_size += name_len + 1; /* test for options */ if (strncmp(s, "ro", 2) == 0) { mask_flags |= MTD_WRITEABLE; s += 2; } /* if lk is found do NOT unlock the MTD partition*/ if (strncmp(s, "lk", 2) == 0) { mask_flags |= MTD_POWERUP_LOCK; s += 2; } /* if slc is found use emulated SLC mode on this partition*/ if (!strncmp(s, "slc", 3)) { add_flags |= MTD_SLC_ON_MLC_EMULATION; s += 3; } /* test if more partitions are following */ if (*s == ',') { if (size == SIZE_REMAINING) { pr_err("no partitions allowed after a fill-up partition\n"); return ERR_PTR(-EINVAL); } /* more partitions follow, parse them */ parts = newpart(s + 1, &s, num_parts, this_part + 1, &extra_mem, extra_mem_size); if (IS_ERR(parts)) return parts; } else { /* this is the last partition: allocate space for all */ int alloc_size; *num_parts = this_part + 1; alloc_size = *num_parts * sizeof(struct mtd_partition) + extra_mem_size; parts = kzalloc(alloc_size, GFP_KERNEL); if (!parts) return ERR_PTR(-ENOMEM); extra_mem = (unsigned char *)(parts + *num_parts); } /* * enter this partition (offset will be calculated later if it is * OFFSET_CONTINUOUS at this point) */ parts[this_part].size = size; parts[this_part].offset = offset; parts[this_part].mask_flags = mask_flags; parts[this_part].add_flags = add_flags; if (name) strscpy(extra_mem, name, name_len + 1); else sprintf(extra_mem, "Partition_%03d", this_part); parts[this_part].name = extra_mem; extra_mem += name_len + 1; pr_debug("partition %d: name <%s>, offset %llx, size %llx, mask flags %x\n", this_part, parts[this_part].name, parts[this_part].offset, parts[this_part].size, parts[this_part].mask_flags); /* return (updated) pointer to extra_mem memory */ if (extra_mem_ptr) *extra_mem_ptr = extra_mem; /* return (updated) pointer command line string */ *retptr = s; /* return partition table */ return parts; } /* * Parse the command line. */ static int mtdpart_setup_real(char *s) { cmdline_parsed = 1; for( ; s != NULL; ) { struct cmdline_mtd_partition *this_mtd; struct mtd_partition *parts; int mtd_id_len, num_parts; char *p, *mtd_id, *semicol, *open_parenth; /* * Replace the first ';' by a NULL char so strrchr can work * properly. */ semicol = strchr(s, ';'); if (semicol) *semicol = '\0'; /* * make sure that part-names with ":" will not be handled as * part of the mtd-id with an ":" */ open_parenth = strchr(s, '('); if (open_parenth) *open_parenth = '\0'; mtd_id = s; /* * fetch <mtd-id>. We use strrchr to ignore all ':' that could * be present in the MTD name, only the last one is interpreted * as an <mtd-id>/<part-definition> separator. */ p = strrchr(s, ':'); /* Restore the '(' now. */ if (open_parenth) *open_parenth = '('; /* Restore the ';' now. */ if (semicol) *semicol = ';'; if (!p) { pr_err("no mtd-id\n"); return -EINVAL; } mtd_id_len = p - mtd_id; pr_debug("parsing <%s>\n", p+1); /* * parse one mtd. have it reserve memory for the * struct cmdline_mtd_partition and the mtd-id string. */ parts = newpart(p + 1, /* cmdline */ &s, /* out: updated cmdline ptr */ &num_parts, /* out: number of parts */ 0, /* first partition */ (unsigned char**)&this_mtd, /* out: extra mem */ mtd_id_len + 1 + sizeof(*this_mtd) + sizeof(void*)-1 /*alignment*/); if (IS_ERR(parts)) { /* * An error occurred. We're either: * a) out of memory, or * b) in the middle of the partition spec * Either way, this mtd is hosed and we're * unlikely to succeed in parsing any more */ return PTR_ERR(parts); } /* align this_mtd */ this_mtd = (struct cmdline_mtd_partition *) ALIGN((unsigned long)this_mtd, sizeof(void *)); /* enter results */ this_mtd->parts = parts; this_mtd->num_parts = num_parts; this_mtd->mtd_id = (char*)(this_mtd + 1); strscpy(this_mtd->mtd_id, mtd_id, mtd_id_len + 1); /* link into chain */ this_mtd->next = partitions; partitions = this_mtd; pr_debug("mtdid=<%s> num_parts=<%d>\n", this_mtd->mtd_id, this_mtd->num_parts); /* EOS - we're done */ if (*s == 0) break; /* does another spec follow? */ if (*s != ';') { pr_err("bad character after partition (%c)\n", *s); return -EINVAL; } s++; } return 0; } /* * Main function to be called from the MTD mapping driver/device to * obtain the partitioning information. At this point the command line * arguments will actually be parsed and turned to struct mtd_partition * information. It returns partitions for the requested mtd device, or * the first one in the chain if a NULL mtd_id is passed in. */ static int parse_cmdline_partitions(struct mtd_info *master, const struct mtd_partition **pparts, struct mtd_part_parser_data *data) { unsigned long long offset; int i, err; struct cmdline_mtd_partition *part; const char *mtd_id = master->name; /* parse command line */ if (!cmdline_parsed) { err = mtdpart_setup_real(cmdline); if (err) return err; } /* * Search for the partition definition matching master->name. * If master->name is not set, stop at first partition definition. */ for (part = partitions; part; part = part->next) { if ((!mtd_id) || (!strcmp(part->mtd_id, mtd_id))) break; } if (!part) return 0; for (i = 0, offset = 0; i < part->num_parts; i++) { if (part->parts[i].offset == OFFSET_CONTINUOUS) part->parts[i].offset = offset; else offset = part->parts[i].offset; if (part->parts[i].size == SIZE_REMAINING) part->parts[i].size = master->size - offset; if (offset + part->parts[i].size > master->size) { pr_warn("%s: partitioning exceeds flash size, truncating\n", part->mtd_id); part->parts[i].size = master->size - offset; } offset += part->parts[i].size; if (part->parts[i].size == 0) { pr_warn("%s: skipping zero sized partition\n", part->mtd_id); part->num_parts--; memmove(&part->parts[i], &part->parts[i + 1], sizeof(*part->parts) * (part->num_parts - i)); i--; } } *pparts = kmemdup(part->parts, sizeof(*part->parts) * part->num_parts, GFP_KERNEL); if (!*pparts) return -ENOMEM; return part->num_parts; } /* * This is the handler for our kernel parameter, called from * main.c::checksetup(). Note that we can not yet kmalloc() anything, * so we only save the commandline for later processing. * * This function needs to be visible for bootloaders. */ static int __init mtdpart_setup(char *s) { cmdline = s; return 1; } __setup("mtdparts=", mtdpart_setup); static struct mtd_part_parser cmdline_parser = { .parse_fn = parse_cmdline_partitions, .name = "cmdlinepart", }; static int __init cmdline_parser_init(void) { if (mtdparts) mtdpart_setup(mtdparts); register_mtd_parser(&cmdline_parser); return 0; } static void __exit cmdline_parser_exit(void) { deregister_mtd_parser(&cmdline_parser); } module_init(cmdline_parser_init); module_exit(cmdline_parser_exit); MODULE_PARM_DESC(mtdparts, "Partitioning specification"); module_param(mtdparts, charp, 0); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Marius Groeger <[email protected]>"); MODULE_DESCRIPTION("Command line configuration of MTD partitions");
// SPDX-License-Identifier: BSD-3-Clause /* * Copyright (c) 2021, Konrad Dybcio <[email protected]> */ /dts-v1/; #include "sm8350-sony-xperia-sagami.dtsi" / { model = "Sony Xperia 5 III"; compatible = "sony,pdx214-generic", "qcom,sm8350"; }; &framebuffer { width = <1080>; height = <2520>; stride = <(1080 * 4)>; }; &pm8350b_gpios { gpio-line-names = "NC", /* GPIO_1 */ "NC", "NC", "NC", "SNAPSHOT_N", "NC", "NC", "FOCUS_N"; }; &pm8350c_gpios { gpio-line-names = "FL_STROBE_TRIG_WIDE", /* GPIO_1 */ "FL_STROBE_TRIG_TELE", "NC", "NC", "NC", "RGBC_IR_PWR_EN", "NC", "NC", "WIDEC_PWR_EN"; };
// SPDX-License-Identifier: GPL-2.0 #include "api/fs/fs.h" #include "util/evsel.h" #include "util/evlist.h" #include "util/pmu.h" #include "util/pmus.h" #include "util/topdown.h" #include "topdown.h" #include "evsel.h" /* Check whether there is a PMU which supports the perf metrics. */ bool topdown_sys_has_perf_metrics(void) { static bool has_perf_metrics; static bool cached; struct perf_pmu *pmu; if (cached) return has_perf_metrics; /* * The perf metrics feature is a core PMU feature. * The PERF_TYPE_RAW type is the type of a core PMU. * The slots event is only available when the core PMU * supports the perf metrics feature. */ pmu = perf_pmus__find_by_type(PERF_TYPE_RAW); if (pmu && perf_pmu__have_event(pmu, "slots")) has_perf_metrics = true; cached = true; return has_perf_metrics; } #define TOPDOWN_SLOTS 0x0400 bool arch_is_topdown_slots(const struct evsel *evsel) { if (evsel->core.attr.config == TOPDOWN_SLOTS) return true; return false; } bool arch_is_topdown_metrics(const struct evsel *evsel) { int config = evsel->core.attr.config; const char *name_from_config; struct perf_pmu *pmu; /* All topdown events have an event code of 0. */ if ((config & 0xFF) != 0) return false; pmu = evsel__find_pmu(evsel); if (!pmu || !pmu->is_core) return false; name_from_config = perf_pmu__name_from_config(pmu, config); return name_from_config && strcasestr(name_from_config, "topdown"); } /* * Check whether a topdown group supports sample-read. * * Only Topdown metric supports sample-read. The slots * event must be the leader of the topdown group. */ bool arch_topdown_sample_read(struct evsel *leader) { struct evsel *evsel; if (!evsel__sys_has_perf_metrics(leader)) return false; if (!arch_is_topdown_slots(leader)) return false; /* * If slots event as leader event but no topdown metric events * in group, slots event should still sample as leader. */ evlist__for_each_entry(leader->evlist, evsel) { if (evsel->core.leader != leader->core.leader) return false; if (evsel != leader && arch_is_topdown_metrics(evsel)) return true; } return false; }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Force feedback support for Zeroplus based devices * * Copyright (c) 2005, 2006 Anssi Hannula <[email protected]> */ /* */ #include <linux/hid.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/module.h> #include "hid-ids.h" #ifdef CONFIG_ZEROPLUS_FF struct zpff_device { struct hid_report *report; }; static int zpff_play(struct input_dev *dev, void *data, struct ff_effect *effect) { struct hid_device *hid = input_get_drvdata(dev); struct zpff_device *zpff = data; int left, right; /* * The following is specified the other way around in the Zeroplus * datasheet but the order below is correct for the XFX Executioner; * however it is possible that the XFX Executioner is an exception */ left = effect->u.rumble.strong_magnitude; right = effect->u.rumble.weak_magnitude; dbg_hid("called with 0x%04x 0x%04x\n", left, right); left = left * 0x7f / 0xffff; right = right * 0x7f / 0xffff; zpff->report->field[2]->value[0] = left; zpff->report->field[3]->value[0] = right; dbg_hid("running with 0x%02x 0x%02x\n", left, right); hid_hw_request(hid, zpff->report, HID_REQ_SET_REPORT); return 0; } static int zpff_init(struct hid_device *hid) { struct zpff_device *zpff; struct hid_report *report; struct hid_input *hidinput; struct input_dev *dev; int i, error; if (list_empty(&hid->inputs)) { hid_err(hid, "no inputs found\n"); return -ENODEV; } hidinput = list_entry(hid->inputs.next, struct hid_input, list); dev = hidinput->input; for (i = 0; i < 4; i++) { report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1); if (!report) return -ENODEV; } zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL); if (!zpff) return -ENOMEM; set_bit(FF_RUMBLE, dev->ffbit); error = input_ff_create_memless(dev, zpff, zpff_play); if (error) { kfree(zpff); return error; } zpff->report = report; zpff->report->field[0]->value[0] = 0x00; zpff->report->field[1]->value[0] = 0x02; zpff->report->field[2]->value[0] = 0x00; zpff->report->field[3]->value[0] = 0x00; hid_hw_request(hid, zpff->report, HID_REQ_SET_REPORT); hid_info(hid, "force feedback for Zeroplus based devices by Anssi Hannula <[email protected]>\n"); return 0; } #else static inline int zpff_init(struct hid_device *hid) { return 0; } #endif static int zp_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; ret = hid_parse(hdev); if (ret) { hid_err(hdev, "parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_FF); if (ret) { hid_err(hdev, "hw start failed\n"); goto err; } zpff_init(hdev); return 0; err: return ret; } static const struct hid_device_id zp_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0005) }, { HID_USB_DEVICE(USB_VENDOR_ID_ZEROPLUS, 0x0030) }, { } }; MODULE_DEVICE_TABLE(hid, zp_devices); static struct hid_driver zp_driver = { .name = "zeroplus", .id_table = zp_devices, .probe = zp_probe, }; module_hid_driver(zp_driver); MODULE_DESCRIPTION("Force feedback support for Zeroplus based devices"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright(c) 2016 Intel Corporation. */ #if !defined(__RVT_TRACE_QP_H) || defined(TRACE_HEADER_MULTI_READ) #define __RVT_TRACE_QP_H #include <linux/tracepoint.h> #include <linux/trace_seq.h> #include <rdma/ib_verbs.h> #include <rdma/rdmavt_qp.h> #undef TRACE_SYSTEM #define TRACE_SYSTEM rvt_qp DECLARE_EVENT_CLASS(rvt_qphash_template, TP_PROTO(struct rvt_qp *qp, u32 bucket), TP_ARGS(qp, bucket), TP_STRUCT__entry( RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device)) __field(u32, qpn) __field(u32, bucket) ), TP_fast_assign( RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->bucket = bucket; ), TP_printk( "[%s] qpn 0x%x bucket %u", __get_str(dev), __entry->qpn, __entry->bucket ) ); DEFINE_EVENT(rvt_qphash_template, rvt_qpinsert, TP_PROTO(struct rvt_qp *qp, u32 bucket), TP_ARGS(qp, bucket)); DEFINE_EVENT(rvt_qphash_template, rvt_qpremove, TP_PROTO(struct rvt_qp *qp, u32 bucket), TP_ARGS(qp, bucket)); DECLARE_EVENT_CLASS( rvt_rnrnak_template, TP_PROTO(struct rvt_qp *qp, u32 to), TP_ARGS(qp, to), TP_STRUCT__entry( RDI_DEV_ENTRY(ib_to_rvt(qp->ibqp.device)) __field(u32, qpn) __field(void *, hrtimer) __field(u32, s_flags) __field(u32, to) ), TP_fast_assign( RDI_DEV_ASSIGN(ib_to_rvt(qp->ibqp.device)); __entry->qpn = qp->ibqp.qp_num; __entry->hrtimer = &qp->s_rnr_timer; __entry->s_flags = qp->s_flags; __entry->to = to; ), TP_printk( "[%s] qpn 0x%x hrtimer 0x%p s_flags 0x%x timeout %u us", __get_str(dev), __entry->qpn, __entry->hrtimer, __entry->s_flags, __entry->to ) ); DEFINE_EVENT( rvt_rnrnak_template, rvt_rnrnak_add, TP_PROTO(struct rvt_qp *qp, u32 to), TP_ARGS(qp, to)); DEFINE_EVENT( rvt_rnrnak_template, rvt_rnrnak_timeout, TP_PROTO(struct rvt_qp *qp, u32 to), TP_ARGS(qp, to)); DEFINE_EVENT( rvt_rnrnak_template, rvt_rnrnak_stop, TP_PROTO(struct rvt_qp *qp, u32 to), TP_ARGS(qp, to)); #endif /* __RVT_TRACE_QP_H */ #undef TRACE_INCLUDE_PATH #undef TRACE_INCLUDE_FILE #define TRACE_INCLUDE_PATH . #define TRACE_INCLUDE_FILE trace_qp #include <trace/define_trace.h>
// SPDX-License-Identifier: GPL-2.0 /* Copyright 2019-2021 NXP * * This is an umbrella module for all network switches that are * register-compatible with Ocelot and that perform I/O to their host CPU * through an NPI (Node Processor Interface) Ethernet port. */ #include <uapi/linux/if_bridge.h> #include <soc/mscc/ocelot_vcap.h> #include <soc/mscc/ocelot_qsys.h> #include <soc/mscc/ocelot_sys.h> #include <soc/mscc/ocelot_dev.h> #include <soc/mscc/ocelot_ana.h> #include <soc/mscc/ocelot_ptp.h> #include <soc/mscc/ocelot.h> #include <linux/dsa/8021q.h> #include <linux/dsa/ocelot.h> #include <linux/platform_device.h> #include <linux/ptp_classify.h> #include <linux/module.h> #include <linux/of_net.h> #include <linux/pci.h> #include <linux/of.h> #include <net/pkt_sched.h> #include <net/dsa.h> #include "felix.h" /* Translate the DSA database API into the ocelot switch library API, * which uses VID 0 for all ports that aren't part of a bridge, * and expects the bridge_dev to be NULL in that case. */ static struct net_device *felix_classify_db(struct dsa_db db) { switch (db.type) { case DSA_DB_PORT: case DSA_DB_LAG: return NULL; case DSA_DB_BRIDGE: return db.bridge.dev; default: return ERR_PTR(-EOPNOTSUPP); } } static int felix_cpu_port_for_conduit(struct dsa_switch *ds, struct net_device *conduit) { struct ocelot *ocelot = ds->priv; struct dsa_port *cpu_dp; int lag; if (netif_is_lag_master(conduit)) { mutex_lock(&ocelot->fwd_domain_lock); lag = ocelot_bond_get_id(ocelot, conduit); mutex_unlock(&ocelot->fwd_domain_lock); return lag; } cpu_dp = conduit->dsa_ptr; return cpu_dp->index; } /** * felix_update_tag_8021q_rx_rule - Update VCAP ES0 tag_8021q rule after * vlan_filtering change * @outer_tagging_rule: Pointer to VCAP filter on which the update is performed * @vlan_filtering: Current bridge VLAN filtering setting * * Source port identification for tag_8021q is done using VCAP ES0 rules on the * CPU port(s). The ES0 tag B (inner tag from the packet) can be configured as * either: * - push_inner_tag=0: the inner tag is never pushed into the frame * (and we lose info about the classified VLAN). This is * good when the classified VLAN is a discardable quantity * for the software RX path: it is either set to * OCELOT_STANDALONE_PVID, or to * ocelot_vlan_unaware_pvid(bridge). * - push_inner_tag=1: the inner tag is always pushed. This is good when the * classified VLAN is not a discardable quantity (the port * is under a VLAN-aware bridge, and software needs to * continue processing the packet in the same VLAN as the * hardware). * The point is that what is good for a VLAN-unaware port is not good for a * VLAN-aware port, and vice versa. Thus, the RX tagging rules must be kept in * sync with the VLAN filtering state of the port. */ static void felix_update_tag_8021q_rx_rule(struct ocelot_vcap_filter *outer_tagging_rule, bool vlan_filtering) { if (vlan_filtering) outer_tagging_rule->action.push_inner_tag = OCELOT_ES0_TAG; else outer_tagging_rule->action.push_inner_tag = OCELOT_NO_ES0_TAG; } /* Set up VCAP ES0 rules for pushing a tag_8021q VLAN towards the CPU such that * the tagger can perform RX source port identification. */ static int felix_tag_8021q_vlan_add_rx(struct dsa_switch *ds, int port, int upstream, u16 vid, bool vlan_filtering) { struct ocelot_vcap_filter *outer_tagging_rule; struct ocelot *ocelot = ds->priv; unsigned long cookie; int key_length, err; key_length = ocelot->vcap[VCAP_ES0].keys[VCAP_ES0_IGR_PORT].length; outer_tagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!outer_tagging_rule) return -ENOMEM; cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); outer_tagging_rule->key_type = OCELOT_VCAP_KEY_ANY; outer_tagging_rule->prio = 1; outer_tagging_rule->id.cookie = cookie; outer_tagging_rule->id.tc_offload = false; outer_tagging_rule->block_id = VCAP_ES0; outer_tagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; outer_tagging_rule->lookup = 0; outer_tagging_rule->ingress_port.value = port; outer_tagging_rule->ingress_port.mask = GENMASK(key_length - 1, 0); outer_tagging_rule->egress_port.value = upstream; outer_tagging_rule->egress_port.mask = GENMASK(key_length - 1, 0); outer_tagging_rule->action.push_outer_tag = OCELOT_ES0_TAG; outer_tagging_rule->action.tag_a_tpid_sel = OCELOT_TAG_TPID_SEL_8021AD; outer_tagging_rule->action.tag_a_vid_sel = 1; outer_tagging_rule->action.vid_a_val = vid; felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering); outer_tagging_rule->action.tag_b_tpid_sel = OCELOT_TAG_TPID_SEL_8021Q; /* Leave TAG_B_VID_SEL at 0 (Classified VID + VID_B_VAL). Since we also * leave VID_B_VAL at 0, this makes ES0 tag B (the inner tag) equal to * the classified VID, which we need to see in the DSA tagger's receive * path. Note: the inner tag is only visible in the packet when pushed * (push_inner_tag == OCELOT_ES0_TAG). */ err = ocelot_vcap_filter_add(ocelot, outer_tagging_rule, NULL); if (err) kfree(outer_tagging_rule); return err; } static int felix_tag_8021q_vlan_del_rx(struct dsa_switch *ds, int port, int upstream, u16 vid) { struct ocelot_vcap_filter *outer_tagging_rule; struct ocelot_vcap_block *block_vcap_es0; struct ocelot *ocelot = ds->priv; unsigned long cookie; block_vcap_es0 = &ocelot->block[VCAP_ES0]; cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, upstream); outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, cookie, false); if (!outer_tagging_rule) return -ENOENT; return ocelot_vcap_filter_del(ocelot, outer_tagging_rule); } /* Set up VCAP IS1 rules for stripping the tag_8021q VLAN on TX and VCAP IS2 * rules for steering those tagged packets towards the correct destination port */ static int felix_tag_8021q_vlan_add_tx(struct dsa_switch *ds, int port, u16 vid) { struct ocelot_vcap_filter *untagging_rule, *redirect_rule; unsigned long cpu_ports = dsa_cpu_ports(ds); struct ocelot *ocelot = ds->priv; unsigned long cookie; int err; untagging_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!untagging_rule) return -ENOMEM; redirect_rule = kzalloc(sizeof(struct ocelot_vcap_filter), GFP_KERNEL); if (!redirect_rule) { kfree(untagging_rule); return -ENOMEM; } cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); untagging_rule->key_type = OCELOT_VCAP_KEY_ANY; untagging_rule->ingress_port_mask = cpu_ports; untagging_rule->vlan.vid.value = vid; untagging_rule->vlan.vid.mask = VLAN_VID_MASK; untagging_rule->prio = 1; untagging_rule->id.cookie = cookie; untagging_rule->id.tc_offload = false; untagging_rule->block_id = VCAP_IS1; untagging_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; untagging_rule->lookup = 0; untagging_rule->action.vlan_pop_cnt_ena = true; untagging_rule->action.vlan_pop_cnt = 1; untagging_rule->action.pag_override_mask = 0xff; untagging_rule->action.pag_val = port; err = ocelot_vcap_filter_add(ocelot, untagging_rule, NULL); if (err) { kfree(untagging_rule); kfree(redirect_rule); return err; } cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); redirect_rule->key_type = OCELOT_VCAP_KEY_ANY; redirect_rule->ingress_port_mask = cpu_ports; redirect_rule->pag = port; redirect_rule->prio = 1; redirect_rule->id.cookie = cookie; redirect_rule->id.tc_offload = false; redirect_rule->block_id = VCAP_IS2; redirect_rule->type = OCELOT_VCAP_FILTER_OFFLOAD; redirect_rule->lookup = 0; redirect_rule->action.mask_mode = OCELOT_MASK_MODE_REDIRECT; redirect_rule->action.port_mask = BIT(port); err = ocelot_vcap_filter_add(ocelot, redirect_rule, NULL); if (err) { ocelot_vcap_filter_del(ocelot, untagging_rule); kfree(redirect_rule); return err; } return 0; } static int felix_tag_8021q_vlan_del_tx(struct dsa_switch *ds, int port, u16 vid) { struct ocelot_vcap_filter *untagging_rule, *redirect_rule; struct ocelot_vcap_block *block_vcap_is1; struct ocelot_vcap_block *block_vcap_is2; struct ocelot *ocelot = ds->priv; unsigned long cookie; int err; block_vcap_is1 = &ocelot->block[VCAP_IS1]; block_vcap_is2 = &ocelot->block[VCAP_IS2]; cookie = OCELOT_VCAP_IS1_TAG_8021Q_TXVLAN(ocelot, port); untagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is1, cookie, false); if (!untagging_rule) return -ENOENT; err = ocelot_vcap_filter_del(ocelot, untagging_rule); if (err) return err; cookie = OCELOT_VCAP_IS2_TAG_8021Q_TXVLAN(ocelot, port); redirect_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_is2, cookie, false); if (!redirect_rule) return -ENOENT; return ocelot_vcap_filter_del(ocelot, redirect_rule); } static int felix_tag_8021q_vlan_add(struct dsa_switch *ds, int port, u16 vid, u16 flags) { struct dsa_port *dp = dsa_to_port(ds, port); struct dsa_port *cpu_dp; int err; /* tag_8021q.c assumes we are implementing this via port VLAN * membership, which we aren't. So we don't need to add any VCAP filter * for the CPU port. */ if (!dsa_port_is_user(dp)) return 0; dsa_switch_for_each_cpu_port(cpu_dp, ds) { err = felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid, dsa_port_is_vlan_filtering(dp)); if (err) return err; } err = felix_tag_8021q_vlan_add_tx(ds, port, vid); if (err) goto add_tx_failed; return 0; add_tx_failed: dsa_switch_for_each_cpu_port(cpu_dp, ds) felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); return err; } static int felix_tag_8021q_vlan_del(struct dsa_switch *ds, int port, u16 vid) { struct dsa_port *dp = dsa_to_port(ds, port); struct dsa_port *cpu_dp; int err; if (!dsa_port_is_user(dp)) return 0; dsa_switch_for_each_cpu_port(cpu_dp, ds) { err = felix_tag_8021q_vlan_del_rx(ds, port, cpu_dp->index, vid); if (err) return err; } err = felix_tag_8021q_vlan_del_tx(ds, port, vid); if (err) goto del_tx_failed; return 0; del_tx_failed: dsa_switch_for_each_cpu_port(cpu_dp, ds) felix_tag_8021q_vlan_add_rx(ds, port, cpu_dp->index, vid, dsa_port_is_vlan_filtering(dp)); return err; } static int felix_update_tag_8021q_rx_rules(struct dsa_switch *ds, int port, bool vlan_filtering) { struct ocelot_vcap_filter *outer_tagging_rule; struct ocelot_vcap_block *block_vcap_es0; struct ocelot *ocelot = ds->priv; struct dsa_port *cpu_dp; unsigned long cookie; int err; block_vcap_es0 = &ocelot->block[VCAP_ES0]; dsa_switch_for_each_cpu_port(cpu_dp, ds) { cookie = OCELOT_VCAP_ES0_TAG_8021Q_RXVLAN(ocelot, port, cpu_dp->index); outer_tagging_rule = ocelot_vcap_block_find_filter_by_id(block_vcap_es0, cookie, false); felix_update_tag_8021q_rx_rule(outer_tagging_rule, vlan_filtering); err = ocelot_vcap_filter_replace(ocelot, outer_tagging_rule); if (err) return err; } return 0; } static int felix_trap_get_cpu_port(struct dsa_switch *ds, const struct ocelot_vcap_filter *trap) { struct dsa_port *dp; int first_port; if (WARN_ON(!trap->ingress_port_mask)) return -1; first_port = __ffs(trap->ingress_port_mask); dp = dsa_to_port(ds, first_port); return dp->cpu_dp->index; } /* On switches with no extraction IRQ wired, trapped packets need to be * replicated over Ethernet as well, otherwise we'd get no notification of * their arrival when using the ocelot-8021q tagging protocol. */ static int felix_update_trapping_destinations(struct dsa_switch *ds, bool using_tag_8021q) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); struct ocelot_vcap_block *block_vcap_is2; struct ocelot_vcap_filter *trap; enum ocelot_mask_mode mask_mode; unsigned long port_mask; bool cpu_copy_ena; int err; if (!felix->info->quirk_no_xtr_irq) return 0; /* We are sure that "cpu" was found, otherwise * dsa_tree_setup_default_cpu() would have failed earlier. */ block_vcap_is2 = &ocelot->block[VCAP_IS2]; /* Make sure all traps are set up for that destination */ list_for_each_entry(trap, &block_vcap_is2->rules, list) { if (!trap->is_trap) continue; /* Figure out the current trapping destination */ if (using_tag_8021q) { /* Redirect to the tag_8021q CPU port. If timestamps * are necessary, also copy trapped packets to the CPU * port module. */ mask_mode = OCELOT_MASK_MODE_REDIRECT; port_mask = BIT(felix_trap_get_cpu_port(ds, trap)); cpu_copy_ena = !!trap->take_ts; } else { /* Trap packets only to the CPU port module, which is * redirected to the NPI port (the DSA CPU port) */ mask_mode = OCELOT_MASK_MODE_PERMIT_DENY; port_mask = 0; cpu_copy_ena = true; } if (trap->action.mask_mode == mask_mode && trap->action.port_mask == port_mask && trap->action.cpu_copy_ena == cpu_copy_ena) continue; trap->action.mask_mode = mask_mode; trap->action.port_mask = port_mask; trap->action.cpu_copy_ena = cpu_copy_ena; err = ocelot_vcap_filter_replace(ocelot, trap); if (err) return err; } return 0; } /* The CPU port module is connected to the Node Processor Interface (NPI). This * is the mode through which frames can be injected from and extracted to an * external CPU, over Ethernet. In NXP SoCs, the "external CPU" is the ARM CPU * running Linux, and this forms a DSA setup together with the enetc or fman * DSA conduit. */ static void felix_npi_port_init(struct ocelot *ocelot, int port) { ocelot->npi = port; ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M | QSYS_EXT_CPU_CFG_EXT_CPU_PORT(port), QSYS_EXT_CPU_CFG); /* NPI port Injection/Extraction configuration */ ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, ocelot->npi_xtr_prefix); ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, ocelot->npi_inj_prefix); /* Disable transmission of pause frames */ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 0); } static void felix_npi_port_deinit(struct ocelot *ocelot, int port) { /* Restore hardware defaults */ int unused_port = ocelot->num_phys_ports + 2; ocelot->npi = -1; ocelot_write(ocelot, QSYS_EXT_CPU_CFG_EXT_CPU_PORT(unused_port), QSYS_EXT_CPU_CFG); ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_XTR_HDR, OCELOT_TAG_PREFIX_DISABLED); ocelot_fields_write(ocelot, port, SYS_PORT_MODE_INCL_INJ_HDR, OCELOT_TAG_PREFIX_DISABLED); /* Enable transmission of pause frames */ ocelot_fields_write(ocelot, port, SYS_PAUSE_CFG_PAUSE_ENA, 1); } static int felix_tag_npi_setup(struct dsa_switch *ds) { struct dsa_port *dp, *first_cpu_dp = NULL; struct ocelot *ocelot = ds->priv; dsa_switch_for_each_user_port(dp, ds) { if (first_cpu_dp && dp->cpu_dp != first_cpu_dp) { dev_err(ds->dev, "Multiple NPI ports not supported\n"); return -EINVAL; } first_cpu_dp = dp->cpu_dp; } if (!first_cpu_dp) return -EINVAL; felix_npi_port_init(ocelot, first_cpu_dp->index); return 0; } static void felix_tag_npi_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; felix_npi_port_deinit(ocelot, ocelot->npi); } static unsigned long felix_tag_npi_get_host_fwd_mask(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; return BIT(ocelot->num_phys_ports); } static int felix_tag_npi_change_conduit(struct dsa_switch *ds, int port, struct net_device *conduit, struct netlink_ext_ack *extack) { struct dsa_port *dp = dsa_to_port(ds, port), *other_dp; struct ocelot *ocelot = ds->priv; if (netif_is_lag_master(conduit)) { NL_SET_ERR_MSG_MOD(extack, "LAG DSA conduit only supported using ocelot-8021q"); return -EOPNOTSUPP; } /* Changing the NPI port breaks user ports still assigned to the old * one, so only allow it while they're down, and don't allow them to * come back up until they're all changed to the new one. */ dsa_switch_for_each_user_port(other_dp, ds) { struct net_device *user = other_dp->user; if (other_dp != dp && (user->flags & IFF_UP) && dsa_port_to_conduit(other_dp) != conduit) { NL_SET_ERR_MSG_MOD(extack, "Cannot change while old conduit still has users"); return -EOPNOTSUPP; } } felix_npi_port_deinit(ocelot, ocelot->npi); felix_npi_port_init(ocelot, felix_cpu_port_for_conduit(ds, conduit)); return 0; } /* Alternatively to using the NPI functionality, that same hardware MAC * connected internally to the enetc or fman DSA conduit can be configured to * use the software-defined tag_8021q frame format. As far as the hardware is * concerned, it thinks it is a "dumb switch" - the queues of the CPU port * module are now disconnected from it, but can still be accessed through * register-based MMIO. */ static const struct felix_tag_proto_ops felix_tag_npi_proto_ops = { .setup = felix_tag_npi_setup, .teardown = felix_tag_npi_teardown, .get_host_fwd_mask = felix_tag_npi_get_host_fwd_mask, .change_conduit = felix_tag_npi_change_conduit, }; static int felix_tag_8021q_setup(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct dsa_port *dp; int err; err = dsa_tag_8021q_register(ds, htons(ETH_P_8021AD)); if (err) return err; dsa_switch_for_each_cpu_port(dp, ds) ocelot_port_setup_dsa_8021q_cpu(ocelot, dp->index); dsa_switch_for_each_user_port(dp, ds) ocelot_port_assign_dsa_8021q_cpu(ocelot, dp->index, dp->cpu_dp->index); dsa_switch_for_each_available_port(dp, ds) /* This overwrites ocelot_init(): * Do not forward BPDU frames to the CPU port module, * for 2 reasons: * - When these packets are injected from the tag_8021q * CPU port, we want them to go out, not loop back * into the system. * - STP traffic ingressing on a user port should go to * the tag_8021q CPU port, not to the hardware CPU * port module. */ ocelot_write_gix(ocelot, ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0), ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); /* The ownership of the CPU port module's queues might have just been * transferred to the tag_8021q tagger from the NPI-based tagger. * So there might still be all sorts of crap in the queues. On the * other hand, the MMIO-based matching of PTP frames is very brittle, * so we need to be careful that there are no extra frames to be * dequeued over MMIO, since we would never know to discard them. */ ocelot_lock_xtr_grp_bh(ocelot, 0); ocelot_drain_cpu_queue(ocelot, 0); ocelot_unlock_xtr_grp_bh(ocelot, 0); /* Problem: when using push_inner_tag=1 for ES0 tag B, we lose info * about whether the received packets were VLAN-tagged on the wire, * since they are always tagged on egress towards the CPU port. * * Since using push_inner_tag=1 is unavoidable for VLAN-aware bridges, * we must work around the fallout by untagging in software to make * untagged reception work more or less as expected. */ ds->untag_vlan_aware_bridge_pvid = true; return 0; } static void felix_tag_8021q_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct dsa_port *dp; dsa_switch_for_each_available_port(dp, ds) /* Restore the logic from ocelot_init: * do not forward BPDU frames to the front ports. */ ocelot_write_gix(ocelot, ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(0xffff), ANA_PORT_CPU_FWD_BPDU_CFG, dp->index); dsa_switch_for_each_user_port(dp, ds) ocelot_port_unassign_dsa_8021q_cpu(ocelot, dp->index); dsa_switch_for_each_cpu_port(dp, ds) ocelot_port_teardown_dsa_8021q_cpu(ocelot, dp->index); dsa_tag_8021q_unregister(ds); ds->untag_vlan_aware_bridge_pvid = false; } static unsigned long felix_tag_8021q_get_host_fwd_mask(struct dsa_switch *ds) { return dsa_cpu_ports(ds); } static int felix_tag_8021q_change_conduit(struct dsa_switch *ds, int port, struct net_device *conduit, struct netlink_ext_ack *extack) { int cpu = felix_cpu_port_for_conduit(ds, conduit); struct ocelot *ocelot = ds->priv; ocelot_port_unassign_dsa_8021q_cpu(ocelot, port); ocelot_port_assign_dsa_8021q_cpu(ocelot, port, cpu); return felix_update_trapping_destinations(ds, true); } static const struct felix_tag_proto_ops felix_tag_8021q_proto_ops = { .setup = felix_tag_8021q_setup, .teardown = felix_tag_8021q_teardown, .get_host_fwd_mask = felix_tag_8021q_get_host_fwd_mask, .change_conduit = felix_tag_8021q_change_conduit, }; static void felix_set_host_flood(struct dsa_switch *ds, unsigned long mask, bool uc, bool mc, bool bc) { struct ocelot *ocelot = ds->priv; unsigned long val; val = uc ? mask : 0; ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_UC); val = mc ? mask : 0; ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MC); ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV4); ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_MCIPV6); val = bc ? mask : 0; ocelot_rmw_rix(ocelot, val, mask, ANA_PGID_PGID, PGID_BC); } static void felix_migrate_host_flood(struct dsa_switch *ds, const struct felix_tag_proto_ops *proto_ops, const struct felix_tag_proto_ops *old_proto_ops) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); unsigned long mask; if (old_proto_ops) { mask = old_proto_ops->get_host_fwd_mask(ds); felix_set_host_flood(ds, mask, false, false, false); } mask = proto_ops->get_host_fwd_mask(ds); felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, !!felix->host_flood_mc_mask, true); } static int felix_migrate_mdbs(struct dsa_switch *ds, const struct felix_tag_proto_ops *proto_ops, const struct felix_tag_proto_ops *old_proto_ops) { struct ocelot *ocelot = ds->priv; unsigned long from, to; if (!old_proto_ops) return 0; from = old_proto_ops->get_host_fwd_mask(ds); to = proto_ops->get_host_fwd_mask(ds); return ocelot_migrate_mdbs(ocelot, from, to); } /* Configure the shared hardware resources for a transition between * @old_proto_ops and @proto_ops. * Manual migration is needed because as far as DSA is concerned, no change of * the CPU port is taking place here, just of the tagging protocol. */ static int felix_tag_proto_setup_shared(struct dsa_switch *ds, const struct felix_tag_proto_ops *proto_ops, const struct felix_tag_proto_ops *old_proto_ops) { bool using_tag_8021q = (proto_ops == &felix_tag_8021q_proto_ops); int err; err = felix_migrate_mdbs(ds, proto_ops, old_proto_ops); if (err) return err; felix_update_trapping_destinations(ds, using_tag_8021q); felix_migrate_host_flood(ds, proto_ops, old_proto_ops); return 0; } /* This always leaves the switch in a consistent state, because although the * tag_8021q setup can fail, the NPI setup can't. So either the change is made, * or the restoration is guaranteed to work. */ static int felix_change_tag_protocol(struct dsa_switch *ds, enum dsa_tag_protocol proto) { const struct felix_tag_proto_ops *old_proto_ops, *proto_ops; struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); int err; switch (proto) { case DSA_TAG_PROTO_SEVILLE: case DSA_TAG_PROTO_OCELOT: proto_ops = &felix_tag_npi_proto_ops; break; case DSA_TAG_PROTO_OCELOT_8021Q: proto_ops = &felix_tag_8021q_proto_ops; break; default: return -EPROTONOSUPPORT; } old_proto_ops = felix->tag_proto_ops; if (proto_ops == old_proto_ops) return 0; err = proto_ops->setup(ds); if (err) goto setup_failed; err = felix_tag_proto_setup_shared(ds, proto_ops, old_proto_ops); if (err) goto setup_shared_failed; if (old_proto_ops) old_proto_ops->teardown(ds); felix->tag_proto_ops = proto_ops; felix->tag_proto = proto; return 0; setup_shared_failed: proto_ops->teardown(ds); setup_failed: return err; } static enum dsa_tag_protocol felix_get_tag_protocol(struct dsa_switch *ds, int port, enum dsa_tag_protocol mp) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); return felix->tag_proto; } static void felix_port_set_host_flood(struct dsa_switch *ds, int port, bool uc, bool mc) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); unsigned long mask; if (uc) felix->host_flood_uc_mask |= BIT(port); else felix->host_flood_uc_mask &= ~BIT(port); if (mc) felix->host_flood_mc_mask |= BIT(port); else felix->host_flood_mc_mask &= ~BIT(port); mask = felix->tag_proto_ops->get_host_fwd_mask(ds); felix_set_host_flood(ds, mask, !!felix->host_flood_uc_mask, !!felix->host_flood_mc_mask, true); } static int felix_port_change_conduit(struct dsa_switch *ds, int port, struct net_device *conduit, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); return felix->tag_proto_ops->change_conduit(ds, port, conduit, extack); } static int felix_set_ageing_time(struct dsa_switch *ds, unsigned int ageing_time) { struct ocelot *ocelot = ds->priv; ocelot_set_ageing_time(ocelot, ageing_time); return 0; } static void felix_port_fast_age(struct dsa_switch *ds, int port) { struct ocelot *ocelot = ds->priv; int err; err = ocelot_mact_flush(ocelot, port); if (err) dev_err(ds->dev, "Flushing MAC table on port %d returned %pe\n", port, ERR_PTR(err)); } static int felix_fdb_dump(struct dsa_switch *ds, int port, dsa_fdb_dump_cb_t *cb, void *data) { struct ocelot *ocelot = ds->priv; return ocelot_fdb_dump(ocelot, port, cb, data); } static int felix_fdb_add(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid, struct dsa_db db) { struct net_device *bridge_dev = felix_classify_db(db); struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; if (IS_ERR(bridge_dev)) return PTR_ERR(bridge_dev); if (dsa_port_is_cpu(dp) && !bridge_dev && dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) return 0; if (dsa_port_is_cpu(dp)) port = PGID_CPU; return ocelot_fdb_add(ocelot, port, addr, vid, bridge_dev); } static int felix_fdb_del(struct dsa_switch *ds, int port, const unsigned char *addr, u16 vid, struct dsa_db db) { struct net_device *bridge_dev = felix_classify_db(db); struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; if (IS_ERR(bridge_dev)) return PTR_ERR(bridge_dev); if (dsa_port_is_cpu(dp) && !bridge_dev && dsa_fdb_present_in_other_db(ds, port, addr, vid, db)) return 0; if (dsa_port_is_cpu(dp)) port = PGID_CPU; return ocelot_fdb_del(ocelot, port, addr, vid, bridge_dev); } static int felix_lag_fdb_add(struct dsa_switch *ds, struct dsa_lag lag, const unsigned char *addr, u16 vid, struct dsa_db db) { struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; if (IS_ERR(bridge_dev)) return PTR_ERR(bridge_dev); return ocelot_lag_fdb_add(ocelot, lag.dev, addr, vid, bridge_dev); } static int felix_lag_fdb_del(struct dsa_switch *ds, struct dsa_lag lag, const unsigned char *addr, u16 vid, struct dsa_db db) { struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; if (IS_ERR(bridge_dev)) return PTR_ERR(bridge_dev); return ocelot_lag_fdb_del(ocelot, lag.dev, addr, vid, bridge_dev); } static int felix_mdb_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) { struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; if (IS_ERR(bridge_dev)) return PTR_ERR(bridge_dev); if (dsa_is_cpu_port(ds, port) && !bridge_dev && dsa_mdb_present_in_other_db(ds, port, mdb, db)) return 0; if (port == ocelot->npi) port = ocelot->num_phys_ports; return ocelot_port_mdb_add(ocelot, port, mdb, bridge_dev); } static int felix_mdb_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_mdb *mdb, struct dsa_db db) { struct net_device *bridge_dev = felix_classify_db(db); struct ocelot *ocelot = ds->priv; if (IS_ERR(bridge_dev)) return PTR_ERR(bridge_dev); if (dsa_is_cpu_port(ds, port) && !bridge_dev && dsa_mdb_present_in_other_db(ds, port, mdb, db)) return 0; if (port == ocelot->npi) port = ocelot->num_phys_ports; return ocelot_port_mdb_del(ocelot, port, mdb, bridge_dev); } static void felix_bridge_stp_state_set(struct dsa_switch *ds, int port, u8 state) { struct ocelot *ocelot = ds->priv; return ocelot_bridge_stp_state_set(ocelot, port, state); } static int felix_pre_bridge_flags(struct dsa_switch *ds, int port, struct switchdev_brport_flags val, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; return ocelot_port_pre_bridge_flags(ocelot, port, val); } static int felix_bridge_flags(struct dsa_switch *ds, int port, struct switchdev_brport_flags val, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; if (port == ocelot->npi) port = ocelot->num_phys_ports; ocelot_port_bridge_flags(ocelot, port, val); return 0; } static int felix_bridge_join(struct dsa_switch *ds, int port, struct dsa_bridge bridge, bool *tx_fwd_offload, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; return ocelot_port_bridge_join(ocelot, port, bridge.dev, bridge.num, extack); } static void felix_bridge_leave(struct dsa_switch *ds, int port, struct dsa_bridge bridge) { struct ocelot *ocelot = ds->priv; ocelot_port_bridge_leave(ocelot, port, bridge.dev); } static int felix_lag_join(struct dsa_switch *ds, int port, struct dsa_lag lag, struct netdev_lag_upper_info *info, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; int err; err = ocelot_port_lag_join(ocelot, port, lag.dev, info, extack); if (err) return err; /* Update the logical LAG port that serves as tag_8021q CPU port */ if (!dsa_is_cpu_port(ds, port)) return 0; return felix_port_change_conduit(ds, port, lag.dev, extack); } static int felix_lag_leave(struct dsa_switch *ds, int port, struct dsa_lag lag) { struct ocelot *ocelot = ds->priv; ocelot_port_lag_leave(ocelot, port, lag.dev); /* Update the logical LAG port that serves as tag_8021q CPU port */ if (!dsa_is_cpu_port(ds, port)) return 0; return felix_port_change_conduit(ds, port, lag.dev, NULL); } static int felix_lag_change(struct dsa_switch *ds, int port) { struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; ocelot_port_lag_change(ocelot, port, dp->lag_tx_enabled); return 0; } static int felix_vlan_prepare(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; u16 flags = vlan->flags; /* Ocelot switches copy frames as-is to the CPU, so the flags: * egress-untagged or not, pvid or not, make no difference. This * behavior is already better than what DSA just tries to approximate * when it installs the VLAN with the same flags on the CPU port. * Just accept any configuration, and don't let ocelot deny installing * multiple native VLANs on the NPI port, because the switch doesn't * look at the port tag settings towards the NPI interface anyway. */ if (port == ocelot->npi) return 0; return ocelot_vlan_prepare(ocelot, port, vlan->vid, flags & BRIDGE_VLAN_INFO_PVID, flags & BRIDGE_VLAN_INFO_UNTAGGED, extack); } static int felix_vlan_filtering(struct dsa_switch *ds, int port, bool enabled, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; bool using_tag_8021q; struct felix *felix; int err; err = ocelot_port_vlan_filtering(ocelot, port, enabled, extack); if (err) return err; felix = ocelot_to_felix(ocelot); using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; if (using_tag_8021q) { err = felix_update_tag_8021q_rx_rules(ds, port, enabled); if (err) return err; } return 0; } static int felix_vlan_add(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; u16 flags = vlan->flags; int err; err = felix_vlan_prepare(ds, port, vlan, extack); if (err) return err; return ocelot_vlan_add(ocelot, port, vlan->vid, flags & BRIDGE_VLAN_INFO_PVID, flags & BRIDGE_VLAN_INFO_UNTAGGED); } static int felix_vlan_del(struct dsa_switch *ds, int port, const struct switchdev_obj_port_vlan *vlan) { struct ocelot *ocelot = ds->priv; return ocelot_vlan_del(ocelot, port, vlan->vid); } static void felix_phylink_get_caps(struct dsa_switch *ds, int port, struct phylink_config *config) { struct ocelot *ocelot = ds->priv; config->mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | MAC_10 | MAC_100 | MAC_1000FD | MAC_2500FD; __set_bit(ocelot->ports[port]->phy_mode, config->supported_interfaces); } static void felix_phylink_mac_config(struct phylink_config *config, unsigned int mode, const struct phylink_link_state *state) { struct dsa_port *dp = dsa_phylink_to_port(config); struct ocelot *ocelot = dp->ds->priv; int port = dp->index; struct felix *felix; felix = ocelot_to_felix(ocelot); if (felix->info->phylink_mac_config) felix->info->phylink_mac_config(ocelot, port, mode, state); } static struct phylink_pcs * felix_phylink_mac_select_pcs(struct phylink_config *config, phy_interface_t iface) { struct dsa_port *dp = dsa_phylink_to_port(config); struct ocelot *ocelot = dp->ds->priv; struct phylink_pcs *pcs = NULL; int port = dp->index; struct felix *felix; felix = ocelot_to_felix(ocelot); if (felix->pcs && felix->pcs[port]) pcs = felix->pcs[port]; return pcs; } static void felix_phylink_mac_link_down(struct phylink_config *config, unsigned int link_an_mode, phy_interface_t interface) { struct dsa_port *dp = dsa_phylink_to_port(config); struct ocelot *ocelot = dp->ds->priv; int port = dp->index; struct felix *felix; felix = ocelot_to_felix(ocelot); ocelot_phylink_mac_link_down(ocelot, port, link_an_mode, interface, felix->info->quirks); } static void felix_phylink_mac_link_up(struct phylink_config *config, struct phy_device *phydev, unsigned int link_an_mode, phy_interface_t interface, int speed, int duplex, bool tx_pause, bool rx_pause) { struct dsa_port *dp = dsa_phylink_to_port(config); struct ocelot *ocelot = dp->ds->priv; int port = dp->index; struct felix *felix; felix = ocelot_to_felix(ocelot); ocelot_phylink_mac_link_up(ocelot, port, phydev, link_an_mode, interface, speed, duplex, tx_pause, rx_pause, felix->info->quirks); if (felix->info->port_sched_speed_set) felix->info->port_sched_speed_set(ocelot, port, speed); } static int felix_port_enable(struct dsa_switch *ds, int port, struct phy_device *phydev) { struct dsa_port *dp = dsa_to_port(ds, port); struct ocelot *ocelot = ds->priv; if (!dsa_port_is_user(dp)) return 0; if (ocelot->npi >= 0) { struct net_device *conduit = dsa_port_to_conduit(dp); if (felix_cpu_port_for_conduit(ds, conduit) != ocelot->npi) { dev_err(ds->dev, "Multiple conduits are not allowed\n"); return -EINVAL; } } return 0; } static void felix_port_qos_map_init(struct ocelot *ocelot, int port) { int i; ocelot_rmw_gix(ocelot, ANA_PORT_QOS_CFG_QOS_PCP_ENA, ANA_PORT_QOS_CFG_QOS_PCP_ENA, ANA_PORT_QOS_CFG, port); for (i = 0; i < OCELOT_NUM_TC * 2; i++) { ocelot_rmw_ix(ocelot, (ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL & i) | ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(i), ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL | ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M, ANA_PORT_PCP_DEI_MAP, port, i); } } static void felix_get_stats64(struct dsa_switch *ds, int port, struct rtnl_link_stats64 *stats) { struct ocelot *ocelot = ds->priv; ocelot_port_get_stats64(ocelot, port, stats); } static void felix_get_pause_stats(struct dsa_switch *ds, int port, struct ethtool_pause_stats *pause_stats) { struct ocelot *ocelot = ds->priv; ocelot_port_get_pause_stats(ocelot, port, pause_stats); } static void felix_get_rmon_stats(struct dsa_switch *ds, int port, struct ethtool_rmon_stats *rmon_stats, const struct ethtool_rmon_hist_range **ranges) { struct ocelot *ocelot = ds->priv; ocelot_port_get_rmon_stats(ocelot, port, rmon_stats, ranges); } static void felix_get_eth_ctrl_stats(struct dsa_switch *ds, int port, struct ethtool_eth_ctrl_stats *ctrl_stats) { struct ocelot *ocelot = ds->priv; ocelot_port_get_eth_ctrl_stats(ocelot, port, ctrl_stats); } static void felix_get_eth_mac_stats(struct dsa_switch *ds, int port, struct ethtool_eth_mac_stats *mac_stats) { struct ocelot *ocelot = ds->priv; ocelot_port_get_eth_mac_stats(ocelot, port, mac_stats); } static void felix_get_eth_phy_stats(struct dsa_switch *ds, int port, struct ethtool_eth_phy_stats *phy_stats) { struct ocelot *ocelot = ds->priv; ocelot_port_get_eth_phy_stats(ocelot, port, phy_stats); } static void felix_get_strings(struct dsa_switch *ds, int port, u32 stringset, u8 *data) { struct ocelot *ocelot = ds->priv; return ocelot_get_strings(ocelot, port, stringset, data); } static void felix_get_ethtool_stats(struct dsa_switch *ds, int port, u64 *data) { struct ocelot *ocelot = ds->priv; ocelot_get_ethtool_stats(ocelot, port, data); } static int felix_get_sset_count(struct dsa_switch *ds, int port, int sset) { struct ocelot *ocelot = ds->priv; return ocelot_get_sset_count(ocelot, port, sset); } static int felix_get_ts_info(struct dsa_switch *ds, int port, struct kernel_ethtool_ts_info *info) { struct ocelot *ocelot = ds->priv; return ocelot_get_ts_info(ocelot, port, info); } static const u32 felix_phy_match_table[PHY_INTERFACE_MODE_MAX] = { [PHY_INTERFACE_MODE_INTERNAL] = OCELOT_PORT_MODE_INTERNAL, [PHY_INTERFACE_MODE_SGMII] = OCELOT_PORT_MODE_SGMII, [PHY_INTERFACE_MODE_QSGMII] = OCELOT_PORT_MODE_QSGMII, [PHY_INTERFACE_MODE_USXGMII] = OCELOT_PORT_MODE_USXGMII, [PHY_INTERFACE_MODE_1000BASEX] = OCELOT_PORT_MODE_1000BASEX, [PHY_INTERFACE_MODE_2500BASEX] = OCELOT_PORT_MODE_2500BASEX, }; static int felix_validate_phy_mode(struct felix *felix, int port, phy_interface_t phy_mode) { u32 modes = felix->info->port_modes[port]; if (felix_phy_match_table[phy_mode] & modes) return 0; return -EOPNOTSUPP; } static int felix_parse_ports_node(struct felix *felix, struct device_node *ports_node, phy_interface_t *port_phy_modes) { struct device *dev = felix->ocelot.dev; for_each_available_child_of_node_scoped(ports_node, child) { phy_interface_t phy_mode; u32 port; int err; /* Get switch port number from DT */ if (of_property_read_u32(child, "reg", &port) < 0) { dev_err(dev, "Port number not defined in device tree " "(property \"reg\")\n"); return -ENODEV; } /* Get PHY mode from DT */ err = of_get_phy_mode(child, &phy_mode); if (err) { dev_err(dev, "Failed to read phy-mode or " "phy-interface-type property for port %d\n", port); return -ENODEV; } err = felix_validate_phy_mode(felix, port, phy_mode); if (err < 0) { dev_info(dev, "Unsupported PHY mode %s on port %d\n", phy_modes(phy_mode), port); /* Leave port_phy_modes[port] = 0, which is also * PHY_INTERFACE_MODE_NA. This will perform a * best-effort to bring up as many ports as possible. */ continue; } port_phy_modes[port] = phy_mode; } return 0; } static int felix_parse_dt(struct felix *felix, phy_interface_t *port_phy_modes) { struct device *dev = felix->ocelot.dev; struct device_node *switch_node; struct device_node *ports_node; int err; switch_node = dev->of_node; ports_node = of_get_child_by_name(switch_node, "ports"); if (!ports_node) ports_node = of_get_child_by_name(switch_node, "ethernet-ports"); if (!ports_node) { dev_err(dev, "Incorrect bindings: absent \"ports\" or \"ethernet-ports\" node\n"); return -ENODEV; } err = felix_parse_ports_node(felix, ports_node, port_phy_modes); of_node_put(ports_node); return err; } static struct regmap *felix_request_regmap_by_name(struct felix *felix, const char *resource_name) { struct ocelot *ocelot = &felix->ocelot; struct resource res; int i; /* In an MFD configuration, regmaps are registered directly to the * parent device before the child devices are probed, so there is no * need to initialize a new one. */ if (!felix->info->resources) return dev_get_regmap(ocelot->dev->parent, resource_name); for (i = 0; i < felix->info->num_resources; i++) { if (strcmp(resource_name, felix->info->resources[i].name)) continue; memcpy(&res, &felix->info->resources[i], sizeof(res)); res.start += felix->switch_base; res.end += felix->switch_base; return ocelot_regmap_init(ocelot, &res); } return ERR_PTR(-ENOENT); } static struct regmap *felix_request_regmap(struct felix *felix, enum ocelot_target target) { const char *resource_name = felix->info->resource_names[target]; /* If the driver didn't provide a resource name for the target, * the resource is optional. */ if (!resource_name) return NULL; return felix_request_regmap_by_name(felix, resource_name); } static struct regmap *felix_request_port_regmap(struct felix *felix, int port) { char resource_name[32]; sprintf(resource_name, "port%d", port); return felix_request_regmap_by_name(felix, resource_name); } static int felix_init_structs(struct felix *felix, int num_phys_ports) { struct ocelot *ocelot = &felix->ocelot; phy_interface_t *port_phy_modes; struct regmap *target; int port, i, err; ocelot->num_phys_ports = num_phys_ports; ocelot->ports = devm_kcalloc(ocelot->dev, num_phys_ports, sizeof(struct ocelot_port *), GFP_KERNEL); if (!ocelot->ports) return -ENOMEM; ocelot->map = felix->info->map; ocelot->num_mact_rows = felix->info->num_mact_rows; ocelot->vcap = felix->info->vcap; ocelot->vcap_pol.base = felix->info->vcap_pol_base; ocelot->vcap_pol.max = felix->info->vcap_pol_max; ocelot->vcap_pol.base2 = felix->info->vcap_pol_base2; ocelot->vcap_pol.max2 = felix->info->vcap_pol_max2; ocelot->ops = felix->info->ops; ocelot->npi_inj_prefix = OCELOT_TAG_PREFIX_SHORT; ocelot->npi_xtr_prefix = OCELOT_TAG_PREFIX_SHORT; ocelot->devlink = felix->ds->devlink; port_phy_modes = kcalloc(num_phys_ports, sizeof(phy_interface_t), GFP_KERNEL); if (!port_phy_modes) return -ENOMEM; err = felix_parse_dt(felix, port_phy_modes); if (err) { kfree(port_phy_modes); return err; } for (i = 0; i < TARGET_MAX; i++) { target = felix_request_regmap(felix, i); if (IS_ERR(target)) { dev_err(ocelot->dev, "Failed to map device memory space: %pe\n", target); kfree(port_phy_modes); return PTR_ERR(target); } ocelot->targets[i] = target; } err = ocelot_regfields_init(ocelot, felix->info->regfields); if (err) { dev_err(ocelot->dev, "failed to init reg fields map\n"); kfree(port_phy_modes); return err; } for (port = 0; port < num_phys_ports; port++) { struct ocelot_port *ocelot_port; ocelot_port = devm_kzalloc(ocelot->dev, sizeof(struct ocelot_port), GFP_KERNEL); if (!ocelot_port) { dev_err(ocelot->dev, "failed to allocate port memory\n"); kfree(port_phy_modes); return -ENOMEM; } target = felix_request_port_regmap(felix, port); if (IS_ERR(target)) { dev_err(ocelot->dev, "Failed to map memory space for port %d: %pe\n", port, target); kfree(port_phy_modes); return PTR_ERR(target); } ocelot_port->phy_mode = port_phy_modes[port]; ocelot_port->ocelot = ocelot; ocelot_port->target = target; ocelot_port->index = port; ocelot->ports[port] = ocelot_port; } kfree(port_phy_modes); if (felix->info->mdio_bus_alloc) { err = felix->info->mdio_bus_alloc(ocelot); if (err < 0) return err; } return 0; } static void ocelot_port_purge_txtstamp_skb(struct ocelot *ocelot, int port, struct sk_buff *skb) { struct ocelot_port *ocelot_port = ocelot->ports[port]; struct sk_buff *clone = OCELOT_SKB_CB(skb)->clone; struct sk_buff *skb_match = NULL, *skb_tmp; unsigned long flags; if (!clone) return; spin_lock_irqsave(&ocelot_port->tx_skbs.lock, flags); skb_queue_walk_safe(&ocelot_port->tx_skbs, skb, skb_tmp) { if (skb != clone) continue; __skb_unlink(skb, &ocelot_port->tx_skbs); skb_match = skb; break; } spin_unlock_irqrestore(&ocelot_port->tx_skbs.lock, flags); WARN_ONCE(!skb_match, "Could not find skb clone in TX timestamping list\n"); } #define work_to_xmit_work(w) \ container_of((w), struct felix_deferred_xmit_work, work) static void felix_port_deferred_xmit(struct kthread_work *work) { struct felix_deferred_xmit_work *xmit_work = work_to_xmit_work(work); struct dsa_switch *ds = xmit_work->dp->ds; struct sk_buff *skb = xmit_work->skb; u32 rew_op = ocelot_ptp_rew_op(skb); struct ocelot *ocelot = ds->priv; int port = xmit_work->dp->index; int retries = 10; ocelot_lock_inj_grp(ocelot, 0); do { if (ocelot_can_inject(ocelot, 0)) break; cpu_relax(); } while (--retries); if (!retries) { ocelot_unlock_inj_grp(ocelot, 0); dev_err(ocelot->dev, "port %d failed to inject skb\n", port); ocelot_port_purge_txtstamp_skb(ocelot, port, skb); kfree_skb(skb); return; } ocelot_port_inject_frame(ocelot, port, 0, rew_op, skb); ocelot_unlock_inj_grp(ocelot, 0); consume_skb(skb); kfree(xmit_work); } static int felix_connect_tag_protocol(struct dsa_switch *ds, enum dsa_tag_protocol proto) { struct ocelot_8021q_tagger_data *tagger_data; switch (proto) { case DSA_TAG_PROTO_OCELOT_8021Q: tagger_data = ocelot_8021q_tagger_data(ds); tagger_data->xmit_work_fn = felix_port_deferred_xmit; return 0; case DSA_TAG_PROTO_OCELOT: case DSA_TAG_PROTO_SEVILLE: return 0; default: return -EPROTONOSUPPORT; } } static int felix_setup(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); struct dsa_port *dp; int err; err = felix_init_structs(felix, ds->num_ports); if (err) return err; if (ocelot->targets[HSIO]) ocelot_pll5_init(ocelot); err = ocelot_init(ocelot); if (err) goto out_mdiobus_free; if (ocelot->ptp) { err = ocelot_init_timestamp(ocelot, felix->info->ptp_caps); if (err) { dev_err(ocelot->dev, "Timestamp initialization failed\n"); ocelot->ptp = 0; } } dsa_switch_for_each_available_port(dp, ds) { ocelot_init_port(ocelot, dp->index); if (felix->info->configure_serdes) felix->info->configure_serdes(ocelot, dp->index, dp->dn); /* Set the default QoS Classification based on PCP and DEI * bits of vlan tag. */ felix_port_qos_map_init(ocelot, dp->index); } if (felix->info->request_irq) { err = felix->info->request_irq(ocelot); if (err) { dev_err(ocelot->dev, "Failed to request IRQ: %pe\n", ERR_PTR(err)); goto out_deinit_ports; } } err = ocelot_devlink_sb_register(ocelot); if (err) goto out_deinit_ports; /* The initial tag protocol is NPI which won't fail during initial * setup, there's no real point in checking for errors. */ felix_change_tag_protocol(ds, felix->tag_proto); ds->mtu_enforcement_ingress = true; ds->assisted_learning_on_cpu_port = true; ds->fdb_isolation = true; ds->max_num_bridges = ds->num_ports; return 0; out_deinit_ports: dsa_switch_for_each_available_port(dp, ds) ocelot_deinit_port(ocelot, dp->index); ocelot_deinit_timestamp(ocelot); ocelot_deinit(ocelot); out_mdiobus_free: if (felix->info->mdio_bus_free) felix->info->mdio_bus_free(ocelot); return err; } static void felix_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); struct dsa_port *dp; rtnl_lock(); if (felix->tag_proto_ops) felix->tag_proto_ops->teardown(ds); rtnl_unlock(); dsa_switch_for_each_available_port(dp, ds) ocelot_deinit_port(ocelot, dp->index); ocelot_devlink_sb_unregister(ocelot); ocelot_deinit_timestamp(ocelot); ocelot_deinit(ocelot); if (felix->info->mdio_bus_free) felix->info->mdio_bus_free(ocelot); } static int felix_hwtstamp_get(struct dsa_switch *ds, int port, struct ifreq *ifr) { struct ocelot *ocelot = ds->priv; return ocelot_hwstamp_get(ocelot, port, ifr); } static int felix_hwtstamp_set(struct dsa_switch *ds, int port, struct ifreq *ifr) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); bool using_tag_8021q; int err; err = ocelot_hwstamp_set(ocelot, port, ifr); if (err) return err; using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; return felix_update_trapping_destinations(ds, using_tag_8021q); } static bool felix_check_xtr_pkt(struct ocelot *ocelot) { struct felix *felix = ocelot_to_felix(ocelot); int err = 0, grp = 0; if (felix->tag_proto != DSA_TAG_PROTO_OCELOT_8021Q) return false; if (!felix->info->quirk_no_xtr_irq) return false; ocelot_lock_xtr_grp(ocelot, grp); while (ocelot_read(ocelot, QS_XTR_DATA_PRESENT) & BIT(grp)) { struct sk_buff *skb; unsigned int type; err = ocelot_xtr_poll_frame(ocelot, grp, &skb); if (err) goto out; /* We trap to the CPU port module all PTP frames, but * felix_rxtstamp() only gets called for event frames. * So we need to avoid sending duplicate general * message frames by running a second BPF classifier * here and dropping those. */ __skb_push(skb, ETH_HLEN); type = ptp_classify_raw(skb); __skb_pull(skb, ETH_HLEN); if (type == PTP_CLASS_NONE) { kfree_skb(skb); continue; } netif_rx(skb); } out: if (err < 0) { dev_err_ratelimited(ocelot->dev, "Error during packet extraction: %pe\n", ERR_PTR(err)); ocelot_drain_cpu_queue(ocelot, 0); } ocelot_unlock_xtr_grp(ocelot, grp); return true; } static bool felix_rxtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb, unsigned int type) { u32 tstamp_lo = OCELOT_SKB_CB(skb)->tstamp_lo; struct skb_shared_hwtstamps *shhwtstamps; struct ocelot *ocelot = ds->priv; struct timespec64 ts; u32 tstamp_hi; u64 tstamp; switch (type & PTP_CLASS_PMASK) { case PTP_CLASS_L2: if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L2)) return false; break; case PTP_CLASS_IPV4: case PTP_CLASS_IPV6: if (!(ocelot->ports[port]->trap_proto & OCELOT_PROTO_PTP_L4)) return false; break; } /* If the "no XTR IRQ" workaround is in use, tell DSA to defer this skb * for RX timestamping. Then free it, and poll for its copy through * MMIO in the CPU port module, and inject that into the stack from * ocelot_xtr_poll(). */ if (felix_check_xtr_pkt(ocelot)) { kfree_skb(skb); return true; } ocelot_ptp_gettime64(&ocelot->ptp_info, &ts); tstamp = ktime_set(ts.tv_sec, ts.tv_nsec); tstamp_hi = tstamp >> 32; if ((tstamp & 0xffffffff) < tstamp_lo) tstamp_hi--; tstamp = ((u64)tstamp_hi << 32) | tstamp_lo; shhwtstamps = skb_hwtstamps(skb); memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps)); shhwtstamps->hwtstamp = tstamp; return false; } static void felix_txtstamp(struct dsa_switch *ds, int port, struct sk_buff *skb) { struct ocelot *ocelot = ds->priv; struct sk_buff *clone = NULL; if (!ocelot->ptp) return; if (ocelot_port_txtstamp_request(ocelot, port, skb, &clone)) { dev_err_ratelimited(ds->dev, "port %d delivering skb without TX timestamp\n", port); return; } if (clone) OCELOT_SKB_CB(skb)->clone = clone; } static int felix_change_mtu(struct dsa_switch *ds, int port, int new_mtu) { struct ocelot *ocelot = ds->priv; struct ocelot_port *ocelot_port = ocelot->ports[port]; ocelot_port_set_maxlen(ocelot, port, new_mtu); mutex_lock(&ocelot->fwd_domain_lock); if (ocelot_port->taprio && ocelot->ops->tas_guard_bands_update) ocelot->ops->tas_guard_bands_update(ocelot, port); mutex_unlock(&ocelot->fwd_domain_lock); return 0; } static int felix_get_max_mtu(struct dsa_switch *ds, int port) { struct ocelot *ocelot = ds->priv; return ocelot_get_max_mtu(ocelot, port); } static int felix_cls_flower_add(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); bool using_tag_8021q; int err; err = ocelot_cls_flower_replace(ocelot, port, cls, ingress); if (err) return err; using_tag_8021q = felix->tag_proto == DSA_TAG_PROTO_OCELOT_8021Q; return felix_update_trapping_destinations(ds, using_tag_8021q); } static int felix_cls_flower_del(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress) { struct ocelot *ocelot = ds->priv; return ocelot_cls_flower_destroy(ocelot, port, cls, ingress); } static int felix_cls_flower_stats(struct dsa_switch *ds, int port, struct flow_cls_offload *cls, bool ingress) { struct ocelot *ocelot = ds->priv; return ocelot_cls_flower_stats(ocelot, port, cls, ingress); } static int felix_port_policer_add(struct dsa_switch *ds, int port, struct dsa_mall_policer_tc_entry *policer) { struct ocelot *ocelot = ds->priv; struct ocelot_policer pol = { .rate = div_u64(policer->rate_bytes_per_sec, 1000) * 8, .burst = policer->burst, }; return ocelot_port_policer_add(ocelot, port, &pol); } static void felix_port_policer_del(struct dsa_switch *ds, int port) { struct ocelot *ocelot = ds->priv; ocelot_port_policer_del(ocelot, port); } static int felix_port_mirror_add(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror, bool ingress, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; return ocelot_port_mirror_add(ocelot, port, mirror->to_local_port, ingress, extack); } static void felix_port_mirror_del(struct dsa_switch *ds, int port, struct dsa_mall_mirror_tc_entry *mirror) { struct ocelot *ocelot = ds->priv; ocelot_port_mirror_del(ocelot, port, mirror->ingress); } static int felix_port_setup_tc(struct dsa_switch *ds, int port, enum tc_setup_type type, void *type_data) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); if (felix->info->port_setup_tc) return felix->info->port_setup_tc(ds, port, type, type_data); else return -EOPNOTSUPP; } static int felix_sb_pool_get(struct dsa_switch *ds, unsigned int sb_index, u16 pool_index, struct devlink_sb_pool_info *pool_info) { struct ocelot *ocelot = ds->priv; return ocelot_sb_pool_get(ocelot, sb_index, pool_index, pool_info); } static int felix_sb_pool_set(struct dsa_switch *ds, unsigned int sb_index, u16 pool_index, u32 size, enum devlink_sb_threshold_type threshold_type, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; return ocelot_sb_pool_set(ocelot, sb_index, pool_index, size, threshold_type, extack); } static int felix_sb_port_pool_get(struct dsa_switch *ds, int port, unsigned int sb_index, u16 pool_index, u32 *p_threshold) { struct ocelot *ocelot = ds->priv; return ocelot_sb_port_pool_get(ocelot, port, sb_index, pool_index, p_threshold); } static int felix_sb_port_pool_set(struct dsa_switch *ds, int port, unsigned int sb_index, u16 pool_index, u32 threshold, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; return ocelot_sb_port_pool_set(ocelot, port, sb_index, pool_index, threshold, extack); } static int felix_sb_tc_pool_bind_get(struct dsa_switch *ds, int port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, u16 *p_pool_index, u32 *p_threshold) { struct ocelot *ocelot = ds->priv; return ocelot_sb_tc_pool_bind_get(ocelot, port, sb_index, tc_index, pool_type, p_pool_index, p_threshold); } static int felix_sb_tc_pool_bind_set(struct dsa_switch *ds, int port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, u16 pool_index, u32 threshold, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; return ocelot_sb_tc_pool_bind_set(ocelot, port, sb_index, tc_index, pool_type, pool_index, threshold, extack); } static int felix_sb_occ_snapshot(struct dsa_switch *ds, unsigned int sb_index) { struct ocelot *ocelot = ds->priv; return ocelot_sb_occ_snapshot(ocelot, sb_index); } static int felix_sb_occ_max_clear(struct dsa_switch *ds, unsigned int sb_index) { struct ocelot *ocelot = ds->priv; return ocelot_sb_occ_max_clear(ocelot, sb_index); } static int felix_sb_occ_port_pool_get(struct dsa_switch *ds, int port, unsigned int sb_index, u16 pool_index, u32 *p_cur, u32 *p_max) { struct ocelot *ocelot = ds->priv; return ocelot_sb_occ_port_pool_get(ocelot, port, sb_index, pool_index, p_cur, p_max); } static int felix_sb_occ_tc_port_bind_get(struct dsa_switch *ds, int port, unsigned int sb_index, u16 tc_index, enum devlink_sb_pool_type pool_type, u32 *p_cur, u32 *p_max) { struct ocelot *ocelot = ds->priv; return ocelot_sb_occ_tc_port_bind_get(ocelot, port, sb_index, tc_index, pool_type, p_cur, p_max); } static int felix_mrp_add(struct dsa_switch *ds, int port, const struct switchdev_obj_mrp *mrp) { struct ocelot *ocelot = ds->priv; return ocelot_mrp_add(ocelot, port, mrp); } static int felix_mrp_del(struct dsa_switch *ds, int port, const struct switchdev_obj_mrp *mrp) { struct ocelot *ocelot = ds->priv; return ocelot_mrp_add(ocelot, port, mrp); } static int felix_mrp_add_ring_role(struct dsa_switch *ds, int port, const struct switchdev_obj_ring_role_mrp *mrp) { struct ocelot *ocelot = ds->priv; return ocelot_mrp_add_ring_role(ocelot, port, mrp); } static int felix_mrp_del_ring_role(struct dsa_switch *ds, int port, const struct switchdev_obj_ring_role_mrp *mrp) { struct ocelot *ocelot = ds->priv; return ocelot_mrp_del_ring_role(ocelot, port, mrp); } static int felix_port_get_default_prio(struct dsa_switch *ds, int port) { struct ocelot *ocelot = ds->priv; return ocelot_port_get_default_prio(ocelot, port); } static int felix_port_set_default_prio(struct dsa_switch *ds, int port, u8 prio) { struct ocelot *ocelot = ds->priv; return ocelot_port_set_default_prio(ocelot, port, prio); } static int felix_port_get_dscp_prio(struct dsa_switch *ds, int port, u8 dscp) { struct ocelot *ocelot = ds->priv; return ocelot_port_get_dscp_prio(ocelot, port, dscp); } static int felix_port_add_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio) { struct ocelot *ocelot = ds->priv; return ocelot_port_add_dscp_prio(ocelot, port, dscp, prio); } static int felix_port_del_dscp_prio(struct dsa_switch *ds, int port, u8 dscp, u8 prio) { struct ocelot *ocelot = ds->priv; return ocelot_port_del_dscp_prio(ocelot, port, dscp, prio); } static int felix_get_mm(struct dsa_switch *ds, int port, struct ethtool_mm_state *state) { struct ocelot *ocelot = ds->priv; return ocelot_port_get_mm(ocelot, port, state); } static int felix_set_mm(struct dsa_switch *ds, int port, struct ethtool_mm_cfg *cfg, struct netlink_ext_ack *extack) { struct ocelot *ocelot = ds->priv; return ocelot_port_set_mm(ocelot, port, cfg, extack); } static void felix_get_mm_stats(struct dsa_switch *ds, int port, struct ethtool_mm_stats *stats) { struct ocelot *ocelot = ds->priv; ocelot_port_get_mm_stats(ocelot, port, stats); } static const struct phylink_mac_ops felix_phylink_mac_ops = { .mac_select_pcs = felix_phylink_mac_select_pcs, .mac_config = felix_phylink_mac_config, .mac_link_down = felix_phylink_mac_link_down, .mac_link_up = felix_phylink_mac_link_up, }; static const struct dsa_switch_ops felix_switch_ops = { .get_tag_protocol = felix_get_tag_protocol, .change_tag_protocol = felix_change_tag_protocol, .connect_tag_protocol = felix_connect_tag_protocol, .setup = felix_setup, .teardown = felix_teardown, .set_ageing_time = felix_set_ageing_time, .get_mm = felix_get_mm, .set_mm = felix_set_mm, .get_mm_stats = felix_get_mm_stats, .get_stats64 = felix_get_stats64, .get_pause_stats = felix_get_pause_stats, .get_rmon_stats = felix_get_rmon_stats, .get_eth_ctrl_stats = felix_get_eth_ctrl_stats, .get_eth_mac_stats = felix_get_eth_mac_stats, .get_eth_phy_stats = felix_get_eth_phy_stats, .get_strings = felix_get_strings, .get_ethtool_stats = felix_get_ethtool_stats, .get_sset_count = felix_get_sset_count, .get_ts_info = felix_get_ts_info, .phylink_get_caps = felix_phylink_get_caps, .port_enable = felix_port_enable, .port_fast_age = felix_port_fast_age, .port_fdb_dump = felix_fdb_dump, .port_fdb_add = felix_fdb_add, .port_fdb_del = felix_fdb_del, .lag_fdb_add = felix_lag_fdb_add, .lag_fdb_del = felix_lag_fdb_del, .port_mdb_add = felix_mdb_add, .port_mdb_del = felix_mdb_del, .port_pre_bridge_flags = felix_pre_bridge_flags, .port_bridge_flags = felix_bridge_flags, .port_bridge_join = felix_bridge_join, .port_bridge_leave = felix_bridge_leave, .port_lag_join = felix_lag_join, .port_lag_leave = felix_lag_leave, .port_lag_change = felix_lag_change, .port_stp_state_set = felix_bridge_stp_state_set, .port_vlan_filtering = felix_vlan_filtering, .port_vlan_add = felix_vlan_add, .port_vlan_del = felix_vlan_del, .port_hwtstamp_get = felix_hwtstamp_get, .port_hwtstamp_set = felix_hwtstamp_set, .port_rxtstamp = felix_rxtstamp, .port_txtstamp = felix_txtstamp, .port_change_mtu = felix_change_mtu, .port_max_mtu = felix_get_max_mtu, .port_policer_add = felix_port_policer_add, .port_policer_del = felix_port_policer_del, .port_mirror_add = felix_port_mirror_add, .port_mirror_del = felix_port_mirror_del, .cls_flower_add = felix_cls_flower_add, .cls_flower_del = felix_cls_flower_del, .cls_flower_stats = felix_cls_flower_stats, .port_setup_tc = felix_port_setup_tc, .devlink_sb_pool_get = felix_sb_pool_get, .devlink_sb_pool_set = felix_sb_pool_set, .devlink_sb_port_pool_get = felix_sb_port_pool_get, .devlink_sb_port_pool_set = felix_sb_port_pool_set, .devlink_sb_tc_pool_bind_get = felix_sb_tc_pool_bind_get, .devlink_sb_tc_pool_bind_set = felix_sb_tc_pool_bind_set, .devlink_sb_occ_snapshot = felix_sb_occ_snapshot, .devlink_sb_occ_max_clear = felix_sb_occ_max_clear, .devlink_sb_occ_port_pool_get = felix_sb_occ_port_pool_get, .devlink_sb_occ_tc_port_bind_get= felix_sb_occ_tc_port_bind_get, .port_mrp_add = felix_mrp_add, .port_mrp_del = felix_mrp_del, .port_mrp_add_ring_role = felix_mrp_add_ring_role, .port_mrp_del_ring_role = felix_mrp_del_ring_role, .tag_8021q_vlan_add = felix_tag_8021q_vlan_add, .tag_8021q_vlan_del = felix_tag_8021q_vlan_del, .port_get_default_prio = felix_port_get_default_prio, .port_set_default_prio = felix_port_set_default_prio, .port_get_dscp_prio = felix_port_get_dscp_prio, .port_add_dscp_prio = felix_port_add_dscp_prio, .port_del_dscp_prio = felix_port_del_dscp_prio, .port_set_host_flood = felix_port_set_host_flood, .port_change_conduit = felix_port_change_conduit, }; int felix_register_switch(struct device *dev, resource_size_t switch_base, int num_flooding_pgids, bool ptp, bool mm_supported, enum dsa_tag_protocol init_tag_proto, const struct felix_info *info) { struct dsa_switch *ds; struct ocelot *ocelot; struct felix *felix; int err; felix = devm_kzalloc(dev, sizeof(*felix), GFP_KERNEL); if (!felix) return -ENOMEM; ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL); if (!ds) return -ENOMEM; dev_set_drvdata(dev, felix); ocelot = &felix->ocelot; ocelot->dev = dev; ocelot->num_flooding_pgids = num_flooding_pgids; ocelot->ptp = ptp; ocelot->mm_supported = mm_supported; felix->info = info; felix->switch_base = switch_base; felix->ds = ds; felix->tag_proto = init_tag_proto; ds->dev = dev; ds->num_ports = info->num_ports; ds->num_tx_queues = OCELOT_NUM_TC; ds->ops = &felix_switch_ops; ds->phylink_mac_ops = &felix_phylink_mac_ops; ds->priv = ocelot; err = dsa_register_switch(ds); if (err) dev_err_probe(dev, err, "Failed to register DSA switch\n"); return err; } EXPORT_SYMBOL_GPL(felix_register_switch); struct net_device *felix_port_to_netdev(struct ocelot *ocelot, int port) { struct felix *felix = ocelot_to_felix(ocelot); struct dsa_switch *ds = felix->ds; if (!dsa_is_user_port(ds, port)) return NULL; return dsa_to_port(ds, port)->user; } EXPORT_SYMBOL_GPL(felix_port_to_netdev); int felix_netdev_to_port(struct net_device *dev) { struct dsa_port *dp; dp = dsa_port_from_netdev(dev); if (IS_ERR(dp)) return -EINVAL; return dp->index; } EXPORT_SYMBOL_GPL(felix_netdev_to_port); MODULE_DESCRIPTION("Felix DSA library"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Facebook */ #include <test_progs.h> #include "test_static_linked.skel.h" void test_static_linked(void) { int err; struct test_static_linked* skel; skel = test_static_linked__open(); if (!ASSERT_OK_PTR(skel, "skel_open")) return; skel->rodata->rovar1 = 1; skel->rodata->rovar2 = 4; err = test_static_linked__load(skel); if (!ASSERT_OK(err, "skel_load")) goto cleanup; err = test_static_linked__attach(skel); if (!ASSERT_OK(err, "skel_attach")) goto cleanup; /* trigger */ usleep(1); ASSERT_EQ(skel->data->var1, 1 * 2 + 2 + 3, "var1"); ASSERT_EQ(skel->data->var2, 4 * 3 + 5 + 6, "var2"); cleanup: test_static_linked__destroy(skel); }
/* * Timer/Counter Unit (TC) registers. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #ifndef __SOC_ATMEL_TCB_H #define __SOC_ATMEL_TCB_H #include <linux/compiler.h> #include <linux/list.h> /* * Many 32-bit Atmel SOCs include one or more TC blocks, each of which holds * three general-purpose 16-bit timers. These timers share one register bank. * Depending on the SOC, each timer may have its own clock and IRQ, or those * may be shared by the whole TC block. * * These TC blocks may have up to nine external pins: TCLK0..2 signals for * clocks or clock gates, and per-timer TIOA and TIOB signals used for PWM * or triggering. Those pins need to be set up for use with the TC block, * else they will be used as GPIOs or for a different controller. * * Although we expect each TC block to have a platform_device node, those * nodes are not what drivers bind to. Instead, they ask for a specific * TC block, by number ... which is a common approach on systems with many * timers. Then they use clk_get() and platform_get_irq() to get clock and * IRQ resources. */ struct clk; /** * struct atmel_tcb_config - SoC data for a Timer/Counter Block * @counter_width: size in bits of a timer counter register * @has_gclk: boolean indicating if a timer counter has a generic clock * @has_qdec: boolean indicating if a timer counter has a quadrature * decoder. */ struct atmel_tcb_config { size_t counter_width; bool has_gclk; bool has_qdec; }; /** * struct atmel_tc - information about a Timer/Counter Block * @pdev: physical device * @regs: mapping through which the I/O registers can be accessed * @id: block id * @tcb_config: configuration data from SoC * @irq: irq for each of the three channels * @clk: internal clock source for each of the three channels * @node: list node, for tclib internal use * @allocated: if already used, for tclib internal use * * On some platforms, each TC channel has its own clocks and IRQs, * while on others, all TC channels share the same clock and IRQ. * Drivers should clk_enable() all the clocks they need even though * all the entries in @clk may point to the same physical clock. * Likewise, drivers should request irqs independently for each * channel, but they must use IRQF_SHARED in case some of the entries * in @irq are actually the same IRQ. */ struct atmel_tc { struct platform_device *pdev; void __iomem *regs; int id; const struct atmel_tcb_config *tcb_config; int irq[3]; struct clk *clk[3]; struct clk *slow_clk; struct list_head node; bool allocated; }; /* platform-specific ATMEL_TC_TIMER_CLOCKx divisors (0 means 32KiHz) */ extern const u8 atmel_tc_divisors[5]; /* * Two registers have block-wide controls. These are: configuring the three * "external" clocks (or event sources) used by the timer channels; and * synchronizing the timers by resetting them all at once. * * "External" can mean "external to chip" using the TCLK0, TCLK1, or TCLK2 * signals. Or, it can mean "external to timer", using the TIOA output from * one of the other two timers that's being run in waveform mode. */ #define ATMEL_TC_BCR 0xc0 /* TC Block Control Register */ #define ATMEL_TC_SYNC (1 << 0) /* synchronize timers */ #define ATMEL_TC_BMR 0xc4 /* TC Block Mode Register */ #define ATMEL_TC_TC0XC0S (3 << 0) /* external clock 0 source */ #define ATMEL_TC_TC0XC0S_TCLK0 (0 << 0) #define ATMEL_TC_TC0XC0S_NONE (1 << 0) #define ATMEL_TC_TC0XC0S_TIOA1 (2 << 0) #define ATMEL_TC_TC0XC0S_TIOA2 (3 << 0) #define ATMEL_TC_TC1XC1S (3 << 2) /* external clock 1 source */ #define ATMEL_TC_TC1XC1S_TCLK1 (0 << 2) #define ATMEL_TC_TC1XC1S_NONE (1 << 2) #define ATMEL_TC_TC1XC1S_TIOA0 (2 << 2) #define ATMEL_TC_TC1XC1S_TIOA2 (3 << 2) #define ATMEL_TC_TC2XC2S (3 << 4) /* external clock 2 source */ #define ATMEL_TC_TC2XC2S_TCLK2 (0 << 4) #define ATMEL_TC_TC2XC2S_NONE (1 << 4) #define ATMEL_TC_TC2XC2S_TIOA0 (2 << 4) #define ATMEL_TC_TC2XC2S_TIOA1 (3 << 4) /* * Each TC block has three "channels", each with one counter and controls. * * Note that the semantics of ATMEL_TC_TIMER_CLOCKx (input clock selection * when it's not "external") is silicon-specific. AT91 platforms use one * set of definitions; AVR32 platforms use a different set. Don't hard-wire * such knowledge into your code, use the global "atmel_tc_divisors" ... * where index N is the divisor for clock N+1, else zero to indicate it uses * the 32 KiHz clock. * * The timers can be chained in various ways, and operated in "waveform" * generation mode (including PWM) or "capture" mode (to time events). In * both modes, behavior can be configured in many ways. * * Each timer has two I/O pins, TIOA and TIOB. Waveform mode uses TIOA as a * PWM output, and TIOB as either another PWM or as a trigger. Capture mode * uses them only as inputs. */ #define ATMEL_TC_CHAN(idx) ((idx)*0x40) #define ATMEL_TC_REG(idx, reg) (ATMEL_TC_CHAN(idx) + ATMEL_TC_ ## reg) #define ATMEL_TC_CCR 0x00 /* Channel Control Register */ #define ATMEL_TC_CLKEN (1 << 0) /* clock enable */ #define ATMEL_TC_CLKDIS (1 << 1) /* clock disable */ #define ATMEL_TC_SWTRG (1 << 2) /* software trigger */ #define ATMEL_TC_CMR 0x04 /* Channel Mode Register */ /* Both modes share some CMR bits */ #define ATMEL_TC_TCCLKS (7 << 0) /* clock source */ #define ATMEL_TC_TIMER_CLOCK1 (0 << 0) #define ATMEL_TC_TIMER_CLOCK2 (1 << 0) #define ATMEL_TC_TIMER_CLOCK3 (2 << 0) #define ATMEL_TC_TIMER_CLOCK4 (3 << 0) #define ATMEL_TC_TIMER_CLOCK5 (4 << 0) #define ATMEL_TC_XC0 (5 << 0) #define ATMEL_TC_XC1 (6 << 0) #define ATMEL_TC_XC2 (7 << 0) #define ATMEL_TC_CLKI (1 << 3) /* clock invert */ #define ATMEL_TC_BURST (3 << 4) /* clock gating */ #define ATMEL_TC_GATE_NONE (0 << 4) #define ATMEL_TC_GATE_XC0 (1 << 4) #define ATMEL_TC_GATE_XC1 (2 << 4) #define ATMEL_TC_GATE_XC2 (3 << 4) #define ATMEL_TC_WAVE (1 << 15) /* true = Waveform mode */ /* CAPTURE mode CMR bits */ #define ATMEL_TC_LDBSTOP (1 << 6) /* counter stops on RB load */ #define ATMEL_TC_LDBDIS (1 << 7) /* counter disable on RB load */ #define ATMEL_TC_ETRGEDG (3 << 8) /* external trigger edge */ #define ATMEL_TC_ETRGEDG_NONE (0 << 8) #define ATMEL_TC_ETRGEDG_RISING (1 << 8) #define ATMEL_TC_ETRGEDG_FALLING (2 << 8) #define ATMEL_TC_ETRGEDG_BOTH (3 << 8) #define ATMEL_TC_ABETRG (1 << 10) /* external trigger is TIOA? */ #define ATMEL_TC_CPCTRG (1 << 14) /* RC compare trigger enable */ #define ATMEL_TC_LDRA (3 << 16) /* RA loading edge (of TIOA) */ #define ATMEL_TC_LDRA_NONE (0 << 16) #define ATMEL_TC_LDRA_RISING (1 << 16) #define ATMEL_TC_LDRA_FALLING (2 << 16) #define ATMEL_TC_LDRA_BOTH (3 << 16) #define ATMEL_TC_LDRB (3 << 18) /* RB loading edge (of TIOA) */ #define ATMEL_TC_LDRB_NONE (0 << 18) #define ATMEL_TC_LDRB_RISING (1 << 18) #define ATMEL_TC_LDRB_FALLING (2 << 18) #define ATMEL_TC_LDRB_BOTH (3 << 18) /* WAVEFORM mode CMR bits */ #define ATMEL_TC_CPCSTOP (1 << 6) /* RC compare stops counter */ #define ATMEL_TC_CPCDIS (1 << 7) /* RC compare disables counter */ #define ATMEL_TC_EEVTEDG (3 << 8) /* external event edge */ #define ATMEL_TC_EEVTEDG_NONE (0 << 8) #define ATMEL_TC_EEVTEDG_RISING (1 << 8) #define ATMEL_TC_EEVTEDG_FALLING (2 << 8) #define ATMEL_TC_EEVTEDG_BOTH (3 << 8) #define ATMEL_TC_EEVT (3 << 10) /* external event source */ #define ATMEL_TC_EEVT_TIOB (0 << 10) #define ATMEL_TC_EEVT_XC0 (1 << 10) #define ATMEL_TC_EEVT_XC1 (2 << 10) #define ATMEL_TC_EEVT_XC2 (3 << 10) #define ATMEL_TC_ENETRG (1 << 12) /* external event is trigger */ #define ATMEL_TC_WAVESEL (3 << 13) /* waveform type */ #define ATMEL_TC_WAVESEL_UP (0 << 13) #define ATMEL_TC_WAVESEL_UPDOWN (1 << 13) #define ATMEL_TC_WAVESEL_UP_AUTO (2 << 13) #define ATMEL_TC_WAVESEL_UPDOWN_AUTO (3 << 13) #define ATMEL_TC_ACPA (3 << 16) /* RA compare changes TIOA */ #define ATMEL_TC_ACPA_NONE (0 << 16) #define ATMEL_TC_ACPA_SET (1 << 16) #define ATMEL_TC_ACPA_CLEAR (2 << 16) #define ATMEL_TC_ACPA_TOGGLE (3 << 16) #define ATMEL_TC_ACPC (3 << 18) /* RC compare changes TIOA */ #define ATMEL_TC_ACPC_NONE (0 << 18) #define ATMEL_TC_ACPC_SET (1 << 18) #define ATMEL_TC_ACPC_CLEAR (2 << 18) #define ATMEL_TC_ACPC_TOGGLE (3 << 18) #define ATMEL_TC_AEEVT (3 << 20) /* external event changes TIOA */ #define ATMEL_TC_AEEVT_NONE (0 << 20) #define ATMEL_TC_AEEVT_SET (1 << 20) #define ATMEL_TC_AEEVT_CLEAR (2 << 20) #define ATMEL_TC_AEEVT_TOGGLE (3 << 20) #define ATMEL_TC_ASWTRG (3 << 22) /* software trigger changes TIOA */ #define ATMEL_TC_ASWTRG_NONE (0 << 22) #define ATMEL_TC_ASWTRG_SET (1 << 22) #define ATMEL_TC_ASWTRG_CLEAR (2 << 22) #define ATMEL_TC_ASWTRG_TOGGLE (3 << 22) #define ATMEL_TC_BCPB (3 << 24) /* RB compare changes TIOB */ #define ATMEL_TC_BCPB_NONE (0 << 24) #define ATMEL_TC_BCPB_SET (1 << 24) #define ATMEL_TC_BCPB_CLEAR (2 << 24) #define ATMEL_TC_BCPB_TOGGLE (3 << 24) #define ATMEL_TC_BCPC (3 << 26) /* RC compare changes TIOB */ #define ATMEL_TC_BCPC_NONE (0 << 26) #define ATMEL_TC_BCPC_SET (1 << 26) #define ATMEL_TC_BCPC_CLEAR (2 << 26) #define ATMEL_TC_BCPC_TOGGLE (3 << 26) #define ATMEL_TC_BEEVT (3 << 28) /* external event changes TIOB */ #define ATMEL_TC_BEEVT_NONE (0 << 28) #define ATMEL_TC_BEEVT_SET (1 << 28) #define ATMEL_TC_BEEVT_CLEAR (2 << 28) #define ATMEL_TC_BEEVT_TOGGLE (3 << 28) #define ATMEL_TC_BSWTRG (3 << 30) /* software trigger changes TIOB */ #define ATMEL_TC_BSWTRG_NONE (0 << 30) #define ATMEL_TC_BSWTRG_SET (1 << 30) #define ATMEL_TC_BSWTRG_CLEAR (2 << 30) #define ATMEL_TC_BSWTRG_TOGGLE (3 << 30) #define ATMEL_TC_CV 0x10 /* counter Value */ #define ATMEL_TC_RA 0x14 /* register A */ #define ATMEL_TC_RB 0x18 /* register B */ #define ATMEL_TC_RC 0x1c /* register C */ #define ATMEL_TC_SR 0x20 /* status (read-only) */ /* Status-only flags */ #define ATMEL_TC_CLKSTA (1 << 16) /* clock enabled */ #define ATMEL_TC_MTIOA (1 << 17) /* TIOA mirror */ #define ATMEL_TC_MTIOB (1 << 18) /* TIOB mirror */ #define ATMEL_TC_IER 0x24 /* interrupt enable (write-only) */ #define ATMEL_TC_IDR 0x28 /* interrupt disable (write-only) */ #define ATMEL_TC_IMR 0x2c /* interrupt mask (read-only) */ /* Status and IRQ flags */ #define ATMEL_TC_COVFS (1 << 0) /* counter overflow */ #define ATMEL_TC_LOVRS (1 << 1) /* load overrun */ #define ATMEL_TC_CPAS (1 << 2) /* RA compare */ #define ATMEL_TC_CPBS (1 << 3) /* RB compare */ #define ATMEL_TC_CPCS (1 << 4) /* RC compare */ #define ATMEL_TC_LDRAS (1 << 5) /* RA loading */ #define ATMEL_TC_LDRBS (1 << 6) /* RB loading */ #define ATMEL_TC_ETRGS (1 << 7) /* external trigger */ #define ATMEL_TC_ALL_IRQ (ATMEL_TC_COVFS | ATMEL_TC_LOVRS | \ ATMEL_TC_CPAS | ATMEL_TC_CPBS | \ ATMEL_TC_CPCS | ATMEL_TC_LDRAS | \ ATMEL_TC_LDRBS | ATMEL_TC_ETRGS) \ /* all IRQs */ #endif
// SPDX-License-Identifier: GPL-2.0-or-later /* Watchdog timer for machines with the CS5535/CS5536 companion chip * * Copyright (C) 2006-2007, Advanced Micro Devices, Inc. * Copyright (C) 2009 Andres Salomon <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/platform_device.h> #include <linux/reboot.h> #include <linux/uaccess.h> #include <linux/cs5535.h> #define GEODEWDT_HZ 500 #define GEODEWDT_SCALE 6 #define GEODEWDT_MAX_SECONDS 131 #define WDT_FLAGS_OPEN 1 #define WDT_FLAGS_ORPHAN 2 #define DRV_NAME "geodewdt" #define WATCHDOG_NAME "Geode GX/LX WDT" #define WATCHDOG_TIMEOUT 60 static int timeout = WATCHDOG_TIMEOUT; module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. 1<= timeout <=131, default=" __MODULE_STRING(WATCHDOG_TIMEOUT) "."); static bool nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); static struct platform_device *geodewdt_platform_device; static unsigned long wdt_flags; static struct cs5535_mfgpt_timer *wdt_timer; static int safe_close; static void geodewdt_ping(void) { /* Stop the counter */ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); /* Reset the counter */ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); /* Enable the counter */ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); } static void geodewdt_disable(void) { cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); } static int geodewdt_set_heartbeat(int val) { if (val < 1 || val > GEODEWDT_MAX_SECONDS) return -EINVAL; cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, 0); cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, val * GEODEWDT_HZ); cs5535_mfgpt_write(wdt_timer, MFGPT_REG_COUNTER, 0); cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, MFGPT_SETUP_CNTEN); timeout = val; return 0; } static int geodewdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(WDT_FLAGS_OPEN, &wdt_flags)) return -EBUSY; if (!test_and_clear_bit(WDT_FLAGS_ORPHAN, &wdt_flags)) __module_get(THIS_MODULE); geodewdt_ping(); return stream_open(inode, file); } static int geodewdt_release(struct inode *inode, struct file *file) { if (safe_close) { geodewdt_disable(); module_put(THIS_MODULE); } else { pr_crit("Unexpected close - watchdog is not stopping\n"); geodewdt_ping(); set_bit(WDT_FLAGS_ORPHAN, &wdt_flags); } clear_bit(WDT_FLAGS_OPEN, &wdt_flags); safe_close = 0; return 0; } static ssize_t geodewdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; safe_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') safe_close = 1; } } geodewdt_ping(); } return len; } static long geodewdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int interval; static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 1, .identity = WATCHDOG_NAME, }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_SETOPTIONS: { int options, ret = -EINVAL; if (get_user(options, p)) return -EFAULT; if (options & WDIOS_DISABLECARD) { geodewdt_disable(); ret = 0; } if (options & WDIOS_ENABLECARD) { geodewdt_ping(); ret = 0; } return ret; } case WDIOC_KEEPALIVE: geodewdt_ping(); return 0; case WDIOC_SETTIMEOUT: if (get_user(interval, p)) return -EFAULT; if (geodewdt_set_heartbeat(interval)) return -EINVAL; fallthrough; case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: return -ENOTTY; } return 0; } static const struct file_operations geodewdt_fops = { .owner = THIS_MODULE, .write = geodewdt_write, .unlocked_ioctl = geodewdt_ioctl, .compat_ioctl = compat_ptr_ioctl, .open = geodewdt_open, .release = geodewdt_release, }; static struct miscdevice geodewdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &geodewdt_fops, }; static int __init geodewdt_probe(struct platform_device *dev) { int ret; wdt_timer = cs5535_mfgpt_alloc_timer(MFGPT_TIMER_ANY, MFGPT_DOMAIN_WORKING); if (!wdt_timer) { pr_err("No timers were available\n"); return -ENODEV; } /* Set up the timer */ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_SETUP, GEODEWDT_SCALE | (3 << 8)); /* Set up comparator 2 to reset when the event fires */ cs5535_mfgpt_toggle_event(wdt_timer, MFGPT_CMP2, MFGPT_EVENT_RESET, 1); /* Set up the initial timeout */ cs5535_mfgpt_write(wdt_timer, MFGPT_REG_CMP2, timeout * GEODEWDT_HZ); ret = misc_register(&geodewdt_miscdev); return ret; } static void geodewdt_remove(struct platform_device *dev) { misc_deregister(&geodewdt_miscdev); } static void geodewdt_shutdown(struct platform_device *dev) { geodewdt_disable(); } static struct platform_driver geodewdt_driver = { .remove = geodewdt_remove, .shutdown = geodewdt_shutdown, .driver = { .name = DRV_NAME, }, }; static int __init geodewdt_init(void) { int ret; geodewdt_platform_device = platform_device_register_simple(DRV_NAME, -1, NULL, 0); if (IS_ERR(geodewdt_platform_device)) return PTR_ERR(geodewdt_platform_device); ret = platform_driver_probe(&geodewdt_driver, geodewdt_probe); if (ret) goto err; return 0; err: platform_device_unregister(geodewdt_platform_device); return ret; } static void __exit geodewdt_exit(void) { platform_device_unregister(geodewdt_platform_device); platform_driver_unregister(&geodewdt_driver); } module_init(geodewdt_init); module_exit(geodewdt_exit); MODULE_AUTHOR("Advanced Micro Devices, Inc"); MODULE_DESCRIPTION("Geode GX/LX Watchdog Driver"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright 2023 Red Hat */ #ifndef STATISTICS_H #define STATISTICS_H #include "types.h" enum { STATISTICS_VERSION = 36, }; struct block_allocator_statistics { /* The total number of slabs from which blocks may be allocated */ u64 slab_count; /* The total number of slabs from which blocks have ever been allocated */ u64 slabs_opened; /* The number of times since loading that a slab has been re-opened */ u64 slabs_reopened; }; /** * Counters for tracking the number of items written (blocks, requests, etc.) * that keep track of totals at steps in the write pipeline. Three counters * allow the number of buffered, in-memory items and the number of in-flight, * unacknowledged writes to be derived, while still tracking totals for * reporting purposes */ struct commit_statistics { /* The total number of items on which processing has started */ u64 started; /* The total number of items for which a write operation has been issued */ u64 written; /* The total number of items for which a write operation has completed */ u64 committed; }; /** Counters for events in the recovery journal */ struct recovery_journal_statistics { /* Number of times the on-disk journal was full */ u64 disk_full; /* Number of times the recovery journal requested slab journal commits. */ u64 slab_journal_commits_requested; /* Write/Commit totals for individual journal entries */ struct commit_statistics entries; /* Write/Commit totals for journal blocks */ struct commit_statistics blocks; }; /** The statistics for the compressed block packer. */ struct packer_statistics { /* Number of compressed data items written since startup */ u64 compressed_fragments_written; /* Number of blocks containing compressed items written since startup */ u64 compressed_blocks_written; /* Number of VIOs that are pending in the packer */ u64 compressed_fragments_in_packer; }; /** The statistics for the slab journals. */ struct slab_journal_statistics { /* Number of times the on-disk journal was full */ u64 disk_full_count; /* Number of times an entry was added over the flush threshold */ u64 flush_count; /* Number of times an entry was added over the block threshold */ u64 blocked_count; /* Number of times a tail block was written */ u64 blocks_written; /* Number of times we had to wait for the tail to write */ u64 tail_busy_count; }; /** The statistics for the slab summary. */ struct slab_summary_statistics { /* Number of blocks written */ u64 blocks_written; }; /** The statistics for the reference counts. */ struct ref_counts_statistics { /* Number of reference blocks written */ u64 blocks_written; }; /** The statistics for the block map. */ struct block_map_statistics { /* number of dirty (resident) pages */ u32 dirty_pages; /* number of clean (resident) pages */ u32 clean_pages; /* number of free pages */ u32 free_pages; /* number of pages in failed state */ u32 failed_pages; /* number of pages incoming */ u32 incoming_pages; /* number of pages outgoing */ u32 outgoing_pages; /* how many times free page not avail */ u32 cache_pressure; /* number of get_vdo_page() calls for read */ u64 read_count; /* number of get_vdo_page() calls for write */ u64 write_count; /* number of times pages failed to read */ u64 failed_reads; /* number of times pages failed to write */ u64 failed_writes; /* number of gets that are reclaimed */ u64 reclaimed; /* number of gets for outgoing pages */ u64 read_outgoing; /* number of gets that were already there */ u64 found_in_cache; /* number of gets requiring discard */ u64 discard_required; /* number of gets enqueued for their page */ u64 wait_for_page; /* number of gets that have to fetch */ u64 fetch_required; /* number of page fetches */ u64 pages_loaded; /* number of page saves */ u64 pages_saved; /* the number of flushes issued */ u64 flush_count; }; /** The dedupe statistics from hash locks */ struct hash_lock_statistics { /* Number of times the UDS advice proved correct */ u64 dedupe_advice_valid; /* Number of times the UDS advice proved incorrect */ u64 dedupe_advice_stale; /* Number of writes with the same data as another in-flight write */ u64 concurrent_data_matches; /* Number of writes whose hash collided with an in-flight write */ u64 concurrent_hash_collisions; /* Current number of dedupe queries that are in flight */ u32 curr_dedupe_queries; }; /** Counts of error conditions in VDO. */ struct error_statistics { /* number of times VDO got an invalid dedupe advice PBN from UDS */ u64 invalid_advice_pbn_count; /* number of times a VIO completed with a VDO_NO_SPACE error */ u64 no_space_error_count; /* number of times a VIO completed with a VDO_READ_ONLY error */ u64 read_only_error_count; }; struct bio_stats { /* Number of REQ_OP_READ bios */ u64 read; /* Number of REQ_OP_WRITE bios with data */ u64 write; /* Number of bios tagged with REQ_PREFLUSH and containing no data */ u64 empty_flush; /* Number of REQ_OP_DISCARD bios */ u64 discard; /* Number of bios tagged with REQ_PREFLUSH */ u64 flush; /* Number of bios tagged with REQ_FUA */ u64 fua; }; struct memory_usage { /* Tracked bytes currently allocated. */ u64 bytes_used; /* Maximum tracked bytes allocated. */ u64 peak_bytes_used; }; /** UDS index statistics */ struct index_statistics { /* Number of records stored in the index */ u64 entries_indexed; /* Number of post calls that found an existing entry */ u64 posts_found; /* Number of post calls that added a new entry */ u64 posts_not_found; /* Number of query calls that found an existing entry */ u64 queries_found; /* Number of query calls that added a new entry */ u64 queries_not_found; /* Number of update calls that found an existing entry */ u64 updates_found; /* Number of update calls that added a new entry */ u64 updates_not_found; /* Number of entries discarded */ u64 entries_discarded; }; /** The statistics of the vdo service. */ struct vdo_statistics { u32 version; /* Number of blocks used for data */ u64 data_blocks_used; /* Number of blocks used for VDO metadata */ u64 overhead_blocks_used; /* Number of logical blocks that are currently mapped to physical blocks */ u64 logical_blocks_used; /* number of physical blocks */ block_count_t physical_blocks; /* number of logical blocks */ block_count_t logical_blocks; /* Size of the block map page cache, in bytes */ u64 block_map_cache_size; /* The physical block size */ u64 block_size; /* Number of times the VDO has successfully recovered */ u64 complete_recoveries; /* Number of times the VDO has recovered from read-only mode */ u64 read_only_recoveries; /* String describing the operating mode of the VDO */ char mode[15]; /* Whether the VDO is in recovery mode */ bool in_recovery_mode; /* What percentage of recovery mode work has been completed */ u8 recovery_percentage; /* The statistics for the compressed block packer */ struct packer_statistics packer; /* Counters for events in the block allocator */ struct block_allocator_statistics allocator; /* Counters for events in the recovery journal */ struct recovery_journal_statistics journal; /* The statistics for the slab journals */ struct slab_journal_statistics slab_journal; /* The statistics for the slab summary */ struct slab_summary_statistics slab_summary; /* The statistics for the reference counts */ struct ref_counts_statistics ref_counts; /* The statistics for the block map */ struct block_map_statistics block_map; /* The dedupe statistics from hash locks */ struct hash_lock_statistics hash_lock; /* Counts of error conditions */ struct error_statistics errors; /* The VDO instance */ u32 instance; /* Current number of active VIOs */ u32 current_vios_in_progress; /* Maximum number of active VIOs */ u32 max_vios; /* Number of times the UDS index was too slow in responding */ u64 dedupe_advice_timeouts; /* Number of flush requests submitted to the storage device */ u64 flush_out; /* Logical block size */ u64 logical_block_size; /* Bios submitted into VDO from above */ struct bio_stats bios_in; struct bio_stats bios_in_partial; /* Bios submitted onward for user data */ struct bio_stats bios_out; /* Bios submitted onward for metadata */ struct bio_stats bios_meta; struct bio_stats bios_journal; struct bio_stats bios_page_cache; struct bio_stats bios_out_completed; struct bio_stats bios_meta_completed; struct bio_stats bios_journal_completed; struct bio_stats bios_page_cache_completed; struct bio_stats bios_acknowledged; struct bio_stats bios_acknowledged_partial; /* Current number of bios in progress */ struct bio_stats bios_in_progress; /* Memory usage stats. */ struct memory_usage memory_usage; /* The statistics for the UDS index */ struct index_statistics index; }; #endif /* not STATISTICS_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2018-2019 HUAWEI, Inc. * https://www.huawei.com/ */ #include "internal.h" #include <linux/unaligned.h> #include <trace/events/erofs.h> struct z_erofs_maprecorder { struct inode *inode; struct erofs_map_blocks *map; unsigned long lcn; /* compression extent information gathered */ u8 type, headtype; u16 clusterofs; u16 delta[2]; erofs_blk_t pblk, compressedblks; erofs_off_t nextpackoff; bool partialref; }; static int z_erofs_load_full_lcluster(struct z_erofs_maprecorder *m, unsigned long lcn) { struct inode *const inode = m->inode; struct erofs_inode *const vi = EROFS_I(inode); const erofs_off_t pos = Z_EROFS_FULL_INDEX_ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize) + lcn * sizeof(struct z_erofs_lcluster_index); struct z_erofs_lcluster_index *di; unsigned int advise; di = erofs_read_metabuf(&m->map->buf, inode->i_sb, pos, EROFS_KMAP); if (IS_ERR(di)) return PTR_ERR(di); m->lcn = lcn; m->nextpackoff = pos + sizeof(struct z_erofs_lcluster_index); advise = le16_to_cpu(di->di_advise); m->type = advise & Z_EROFS_LI_LCLUSTER_TYPE_MASK; if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { m->clusterofs = 1 << vi->z_logical_clusterbits; m->delta[0] = le16_to_cpu(di->di_u.delta[0]); if (m->delta[0] & Z_EROFS_LI_D0_CBLKCNT) { if (!(vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 | Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { DBG_BUGON(1); return -EFSCORRUPTED; } m->compressedblks = m->delta[0] & ~Z_EROFS_LI_D0_CBLKCNT; m->delta[0] = 1; } m->delta[1] = le16_to_cpu(di->di_u.delta[1]); } else { m->partialref = !!(advise & Z_EROFS_LI_PARTIAL_REF); m->clusterofs = le16_to_cpu(di->di_clusterofs); if (m->clusterofs >= 1 << vi->z_logical_clusterbits) { DBG_BUGON(1); return -EFSCORRUPTED; } m->pblk = le32_to_cpu(di->di_u.blkaddr); } return 0; } static unsigned int decode_compactedbits(unsigned int lobits, u8 *in, unsigned int pos, u8 *type) { const unsigned int v = get_unaligned_le32(in + pos / 8) >> (pos & 7); const unsigned int lo = v & ((1 << lobits) - 1); *type = (v >> lobits) & 3; return lo; } static int get_compacted_la_distance(unsigned int lobits, unsigned int encodebits, unsigned int vcnt, u8 *in, int i) { unsigned int lo, d1 = 0; u8 type; DBG_BUGON(i >= vcnt); do { lo = decode_compactedbits(lobits, in, encodebits * i, &type); if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) return d1; ++d1; } while (++i < vcnt); /* vcnt - 1 (Z_EROFS_LCLUSTER_TYPE_NONHEAD) item */ if (!(lo & Z_EROFS_LI_D0_CBLKCNT)) d1 += lo - 1; return d1; } static int unpack_compacted_index(struct z_erofs_maprecorder *m, unsigned int amortizedshift, erofs_off_t pos, bool lookahead) { struct erofs_inode *const vi = EROFS_I(m->inode); const unsigned int lclusterbits = vi->z_logical_clusterbits; unsigned int vcnt, lo, lobits, encodebits, nblk, bytes; bool big_pcluster; u8 *in, type; int i; if (1 << amortizedshift == 4 && lclusterbits <= 14) vcnt = 2; else if (1 << amortizedshift == 2 && lclusterbits <= 12) vcnt = 16; else return -EOPNOTSUPP; in = erofs_read_metabuf(&m->map->buf, m->inode->i_sb, pos, EROFS_KMAP); if (IS_ERR(in)) return PTR_ERR(in); /* it doesn't equal to round_up(..) */ m->nextpackoff = round_down(pos, vcnt << amortizedshift) + (vcnt << amortizedshift); big_pcluster = vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1; lobits = max(lclusterbits, ilog2(Z_EROFS_LI_D0_CBLKCNT) + 1U); encodebits = ((vcnt << amortizedshift) - sizeof(__le32)) * 8 / vcnt; bytes = pos & ((vcnt << amortizedshift) - 1); in -= bytes; i = bytes >> amortizedshift; lo = decode_compactedbits(lobits, in, encodebits * i, &type); m->type = type; if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { m->clusterofs = 1 << lclusterbits; /* figure out lookahead_distance: delta[1] if needed */ if (lookahead) m->delta[1] = get_compacted_la_distance(lobits, encodebits, vcnt, in, i); if (lo & Z_EROFS_LI_D0_CBLKCNT) { if (!big_pcluster) { DBG_BUGON(1); return -EFSCORRUPTED; } m->compressedblks = lo & ~Z_EROFS_LI_D0_CBLKCNT; m->delta[0] = 1; return 0; } else if (i + 1 != (int)vcnt) { m->delta[0] = lo; return 0; } /* * since the last lcluster in the pack is special, * of which lo saves delta[1] rather than delta[0]. * Hence, get delta[0] by the previous lcluster indirectly. */ lo = decode_compactedbits(lobits, in, encodebits * (i - 1), &type); if (type != Z_EROFS_LCLUSTER_TYPE_NONHEAD) lo = 0; else if (lo & Z_EROFS_LI_D0_CBLKCNT) lo = 1; m->delta[0] = lo + 1; return 0; } m->clusterofs = lo; m->delta[0] = 0; /* figout out blkaddr (pblk) for HEAD lclusters */ if (!big_pcluster) { nblk = 1; while (i > 0) { --i; lo = decode_compactedbits(lobits, in, encodebits * i, &type); if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) i -= lo; if (i >= 0) ++nblk; } } else { nblk = 0; while (i > 0) { --i; lo = decode_compactedbits(lobits, in, encodebits * i, &type); if (type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { if (lo & Z_EROFS_LI_D0_CBLKCNT) { --i; nblk += lo & ~Z_EROFS_LI_D0_CBLKCNT; continue; } /* bigpcluster shouldn't have plain d0 == 1 */ if (lo <= 1) { DBG_BUGON(1); return -EFSCORRUPTED; } i -= lo - 2; continue; } ++nblk; } } in += (vcnt << amortizedshift) - sizeof(__le32); m->pblk = le32_to_cpu(*(__le32 *)in) + nblk; return 0; } static int z_erofs_load_compact_lcluster(struct z_erofs_maprecorder *m, unsigned long lcn, bool lookahead) { struct inode *const inode = m->inode; struct erofs_inode *const vi = EROFS_I(inode); const erofs_off_t ebase = sizeof(struct z_erofs_map_header) + ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8); unsigned int totalidx = erofs_iblks(inode); unsigned int compacted_4b_initial, compacted_2b; unsigned int amortizedshift; erofs_off_t pos; if (lcn >= totalidx || vi->z_logical_clusterbits > 14) return -EINVAL; m->lcn = lcn; /* used to align to 32-byte (compacted_2b) alignment */ compacted_4b_initial = (32 - ebase % 32) / 4; if (compacted_4b_initial == 32 / 4) compacted_4b_initial = 0; if ((vi->z_advise & Z_EROFS_ADVISE_COMPACTED_2B) && compacted_4b_initial < totalidx) compacted_2b = rounddown(totalidx - compacted_4b_initial, 16); else compacted_2b = 0; pos = ebase; if (lcn < compacted_4b_initial) { amortizedshift = 2; goto out; } pos += compacted_4b_initial * 4; lcn -= compacted_4b_initial; if (lcn < compacted_2b) { amortizedshift = 1; goto out; } pos += compacted_2b * 2; lcn -= compacted_2b; amortizedshift = 2; out: pos += lcn * (1 << amortizedshift); return unpack_compacted_index(m, amortizedshift, pos, lookahead); } static int z_erofs_load_lcluster_from_disk(struct z_erofs_maprecorder *m, unsigned int lcn, bool lookahead) { switch (EROFS_I(m->inode)->datalayout) { case EROFS_INODE_COMPRESSED_FULL: return z_erofs_load_full_lcluster(m, lcn); case EROFS_INODE_COMPRESSED_COMPACT: return z_erofs_load_compact_lcluster(m, lcn, lookahead); default: return -EINVAL; } } static int z_erofs_extent_lookback(struct z_erofs_maprecorder *m, unsigned int lookback_distance) { struct super_block *sb = m->inode->i_sb; struct erofs_inode *const vi = EROFS_I(m->inode); const unsigned int lclusterbits = vi->z_logical_clusterbits; while (m->lcn >= lookback_distance) { unsigned long lcn = m->lcn - lookback_distance; int err; err = z_erofs_load_lcluster_from_disk(m, lcn, false); if (err) return err; switch (m->type) { case Z_EROFS_LCLUSTER_TYPE_NONHEAD: lookback_distance = m->delta[0]; if (!lookback_distance) goto err_bogus; continue; case Z_EROFS_LCLUSTER_TYPE_PLAIN: case Z_EROFS_LCLUSTER_TYPE_HEAD1: case Z_EROFS_LCLUSTER_TYPE_HEAD2: m->headtype = m->type; m->map->m_la = (lcn << lclusterbits) | m->clusterofs; return 0; default: erofs_err(sb, "unknown type %u @ lcn %lu of nid %llu", m->type, lcn, vi->nid); DBG_BUGON(1); return -EOPNOTSUPP; } } err_bogus: erofs_err(sb, "bogus lookback distance %u @ lcn %lu of nid %llu", lookback_distance, m->lcn, vi->nid); DBG_BUGON(1); return -EFSCORRUPTED; } static int z_erofs_get_extent_compressedlen(struct z_erofs_maprecorder *m, unsigned int initial_lcn) { struct super_block *sb = m->inode->i_sb; struct erofs_inode *const vi = EROFS_I(m->inode); struct erofs_map_blocks *const map = m->map; const unsigned int lclusterbits = vi->z_logical_clusterbits; unsigned long lcn; int err; DBG_BUGON(m->type != Z_EROFS_LCLUSTER_TYPE_PLAIN && m->type != Z_EROFS_LCLUSTER_TYPE_HEAD1 && m->type != Z_EROFS_LCLUSTER_TYPE_HEAD2); DBG_BUGON(m->type != m->headtype); if (m->headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN || ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD1) && !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1)) || ((m->headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2) && !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2))) { map->m_plen = 1ULL << lclusterbits; return 0; } lcn = m->lcn + 1; if (m->compressedblks) goto out; err = z_erofs_load_lcluster_from_disk(m, lcn, false); if (err) return err; /* * If the 1st NONHEAD lcluster has already been handled initially w/o * valid compressedblks, which means at least it mustn't be CBLKCNT, or * an internal implemenatation error is detected. * * The following code can also handle it properly anyway, but let's * BUG_ON in the debugging mode only for developers to notice that. */ DBG_BUGON(lcn == initial_lcn && m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD); switch (m->type) { case Z_EROFS_LCLUSTER_TYPE_PLAIN: case Z_EROFS_LCLUSTER_TYPE_HEAD1: case Z_EROFS_LCLUSTER_TYPE_HEAD2: /* * if the 1st NONHEAD lcluster is actually PLAIN or HEAD type * rather than CBLKCNT, it's a 1 lcluster-sized pcluster. */ m->compressedblks = 1 << (lclusterbits - sb->s_blocksize_bits); break; case Z_EROFS_LCLUSTER_TYPE_NONHEAD: if (m->delta[0] != 1) goto err_bonus_cblkcnt; if (m->compressedblks) break; fallthrough; default: erofs_err(sb, "cannot found CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid); DBG_BUGON(1); return -EFSCORRUPTED; } out: map->m_plen = erofs_pos(sb, m->compressedblks); return 0; err_bonus_cblkcnt: erofs_err(sb, "bogus CBLKCNT @ lcn %lu of nid %llu", lcn, vi->nid); DBG_BUGON(1); return -EFSCORRUPTED; } static int z_erofs_get_extent_decompressedlen(struct z_erofs_maprecorder *m) { struct inode *inode = m->inode; struct erofs_inode *vi = EROFS_I(inode); struct erofs_map_blocks *map = m->map; unsigned int lclusterbits = vi->z_logical_clusterbits; u64 lcn = m->lcn, headlcn = map->m_la >> lclusterbits; int err; while (1) { /* handle the last EOF pcluster (no next HEAD lcluster) */ if ((lcn << lclusterbits) >= inode->i_size) { map->m_llen = inode->i_size - map->m_la; return 0; } err = z_erofs_load_lcluster_from_disk(m, lcn, true); if (err) return err; if (m->type == Z_EROFS_LCLUSTER_TYPE_NONHEAD) { /* work around invalid d1 generated by pre-1.0 mkfs */ if (unlikely(!m->delta[1])) { m->delta[1] = 1; DBG_BUGON(1); } } else if (m->type == Z_EROFS_LCLUSTER_TYPE_PLAIN || m->type == Z_EROFS_LCLUSTER_TYPE_HEAD1 || m->type == Z_EROFS_LCLUSTER_TYPE_HEAD2) { if (lcn != headlcn) break; /* ends at the next HEAD lcluster */ m->delta[1] = 1; } else { erofs_err(inode->i_sb, "unknown type %u @ lcn %llu of nid %llu", m->type, lcn, vi->nid); DBG_BUGON(1); return -EOPNOTSUPP; } lcn += m->delta[1]; } map->m_llen = (lcn << lclusterbits) + m->clusterofs - map->m_la; return 0; } static int z_erofs_do_map_blocks(struct inode *inode, struct erofs_map_blocks *map, int flags) { struct erofs_inode *const vi = EROFS_I(inode); bool ztailpacking = vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER; bool fragment = vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER; struct z_erofs_maprecorder m = { .inode = inode, .map = map, }; int err = 0; unsigned int lclusterbits, endoff, afmt; unsigned long initial_lcn; unsigned long long ofs, end; lclusterbits = vi->z_logical_clusterbits; ofs = flags & EROFS_GET_BLOCKS_FINDTAIL ? inode->i_size - 1 : map->m_la; initial_lcn = ofs >> lclusterbits; endoff = ofs & ((1 << lclusterbits) - 1); err = z_erofs_load_lcluster_from_disk(&m, initial_lcn, false); if (err) goto unmap_out; if (ztailpacking && (flags & EROFS_GET_BLOCKS_FINDTAIL)) vi->z_idataoff = m.nextpackoff; map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_ENCODED; end = (m.lcn + 1ULL) << lclusterbits; switch (m.type) { case Z_EROFS_LCLUSTER_TYPE_PLAIN: case Z_EROFS_LCLUSTER_TYPE_HEAD1: case Z_EROFS_LCLUSTER_TYPE_HEAD2: if (endoff >= m.clusterofs) { m.headtype = m.type; map->m_la = (m.lcn << lclusterbits) | m.clusterofs; /* * For ztailpacking files, in order to inline data more * effectively, special EOF lclusters are now supported * which can have three parts at most. */ if (ztailpacking && end > inode->i_size) end = inode->i_size; break; } /* m.lcn should be >= 1 if endoff < m.clusterofs */ if (!m.lcn) { erofs_err(inode->i_sb, "invalid logical cluster 0 at nid %llu", vi->nid); err = -EFSCORRUPTED; goto unmap_out; } end = (m.lcn << lclusterbits) | m.clusterofs; map->m_flags |= EROFS_MAP_FULL_MAPPED; m.delta[0] = 1; fallthrough; case Z_EROFS_LCLUSTER_TYPE_NONHEAD: /* get the corresponding first chunk */ err = z_erofs_extent_lookback(&m, m.delta[0]); if (err) goto unmap_out; break; default: erofs_err(inode->i_sb, "unknown type %u @ offset %llu of nid %llu", m.type, ofs, vi->nid); err = -EOPNOTSUPP; goto unmap_out; } if (m.partialref) map->m_flags |= EROFS_MAP_PARTIAL_REF; map->m_llen = end - map->m_la; if (flags & EROFS_GET_BLOCKS_FINDTAIL) { vi->z_tailextent_headlcn = m.lcn; /* for non-compact indexes, fragmentoff is 64 bits */ if (fragment && vi->datalayout == EROFS_INODE_COMPRESSED_FULL) vi->z_fragmentoff |= (u64)m.pblk << 32; } if (ztailpacking && m.lcn == vi->z_tailextent_headlcn) { map->m_flags |= EROFS_MAP_META; map->m_pa = vi->z_idataoff; map->m_plen = vi->z_idata_size; } else if (fragment && m.lcn == vi->z_tailextent_headlcn) { map->m_flags |= EROFS_MAP_FRAGMENT; } else { map->m_pa = erofs_pos(inode->i_sb, m.pblk); err = z_erofs_get_extent_compressedlen(&m, initial_lcn); if (err) goto unmap_out; } if (m.headtype == Z_EROFS_LCLUSTER_TYPE_PLAIN) { if (map->m_llen > map->m_plen) { DBG_BUGON(1); err = -EFSCORRUPTED; goto unmap_out; } afmt = vi->z_advise & Z_EROFS_ADVISE_INTERLACED_PCLUSTER ? Z_EROFS_COMPRESSION_INTERLACED : Z_EROFS_COMPRESSION_SHIFTED; } else { afmt = m.headtype == Z_EROFS_LCLUSTER_TYPE_HEAD2 ? vi->z_algorithmtype[1] : vi->z_algorithmtype[0]; if (!(EROFS_I_SB(inode)->available_compr_algs & (1 << afmt))) { erofs_err(inode->i_sb, "inconsistent algorithmtype %u for nid %llu", afmt, vi->nid); err = -EFSCORRUPTED; goto unmap_out; } } map->m_algorithmformat = afmt; if ((flags & EROFS_GET_BLOCKS_FIEMAP) || ((flags & EROFS_GET_BLOCKS_READMORE) && (map->m_algorithmformat == Z_EROFS_COMPRESSION_LZMA || map->m_algorithmformat == Z_EROFS_COMPRESSION_DEFLATE || map->m_algorithmformat == Z_EROFS_COMPRESSION_ZSTD) && map->m_llen >= i_blocksize(inode))) { err = z_erofs_get_extent_decompressedlen(&m); if (!err) map->m_flags |= EROFS_MAP_FULL_MAPPED; } unmap_out: erofs_unmap_metabuf(&m.map->buf); return err; } static int z_erofs_fill_inode_lazy(struct inode *inode) { struct erofs_inode *const vi = EROFS_I(inode); struct super_block *const sb = inode->i_sb; int err, headnr; erofs_off_t pos; struct erofs_buf buf = __EROFS_BUF_INITIALIZER; struct z_erofs_map_header *h; if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) { /* * paired with smp_mb() at the end of the function to ensure * fields will only be observed after the bit is set. */ smp_mb(); return 0; } if (wait_on_bit_lock(&vi->flags, EROFS_I_BL_Z_BIT, TASK_KILLABLE)) return -ERESTARTSYS; err = 0; if (test_bit(EROFS_I_Z_INITED_BIT, &vi->flags)) goto out_unlock; pos = ALIGN(erofs_iloc(inode) + vi->inode_isize + vi->xattr_isize, 8); h = erofs_read_metabuf(&buf, sb, pos, EROFS_KMAP); if (IS_ERR(h)) { err = PTR_ERR(h); goto out_unlock; } /* * if the highest bit of the 8-byte map header is set, the whole file * is stored in the packed inode. The rest bits keeps z_fragmentoff. */ if (h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT) { vi->z_advise = Z_EROFS_ADVISE_FRAGMENT_PCLUSTER; vi->z_fragmentoff = le64_to_cpu(*(__le64 *)h) ^ (1ULL << 63); vi->z_tailextent_headlcn = 0; goto done; } vi->z_advise = le16_to_cpu(h->h_advise); vi->z_algorithmtype[0] = h->h_algorithmtype & 15; vi->z_algorithmtype[1] = h->h_algorithmtype >> 4; headnr = 0; if (vi->z_algorithmtype[0] >= Z_EROFS_COMPRESSION_MAX || vi->z_algorithmtype[++headnr] >= Z_EROFS_COMPRESSION_MAX) { erofs_err(sb, "unknown HEAD%u format %u for nid %llu, please upgrade kernel", headnr + 1, vi->z_algorithmtype[headnr], vi->nid); err = -EOPNOTSUPP; goto out_put_metabuf; } vi->z_logical_clusterbits = sb->s_blocksize_bits + (h->h_clusterbits & 7); if (!erofs_sb_has_big_pcluster(EROFS_SB(sb)) && vi->z_advise & (Z_EROFS_ADVISE_BIG_PCLUSTER_1 | Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { erofs_err(sb, "per-inode big pcluster without sb feature for nid %llu", vi->nid); err = -EFSCORRUPTED; goto out_put_metabuf; } if (vi->datalayout == EROFS_INODE_COMPRESSED_COMPACT && !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_1) ^ !(vi->z_advise & Z_EROFS_ADVISE_BIG_PCLUSTER_2)) { erofs_err(sb, "big pcluster head1/2 of compact indexes should be consistent for nid %llu", vi->nid); err = -EFSCORRUPTED; goto out_put_metabuf; } if (vi->z_advise & Z_EROFS_ADVISE_INLINE_PCLUSTER) { struct erofs_map_blocks map = { .buf = __EROFS_BUF_INITIALIZER }; vi->z_idata_size = le16_to_cpu(h->h_idata_size); err = z_erofs_do_map_blocks(inode, &map, EROFS_GET_BLOCKS_FINDTAIL); erofs_put_metabuf(&map.buf); if (!map.m_plen || erofs_blkoff(sb, map.m_pa) + map.m_plen > sb->s_blocksize) { erofs_err(sb, "invalid tail-packing pclustersize %llu", map.m_plen); err = -EFSCORRUPTED; } if (err < 0) goto out_put_metabuf; } if (vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER && !(h->h_clusterbits >> Z_EROFS_FRAGMENT_INODE_BIT)) { struct erofs_map_blocks map = { .buf = __EROFS_BUF_INITIALIZER }; vi->z_fragmentoff = le32_to_cpu(h->h_fragmentoff); err = z_erofs_do_map_blocks(inode, &map, EROFS_GET_BLOCKS_FINDTAIL); erofs_put_metabuf(&map.buf); if (err < 0) goto out_put_metabuf; } done: /* paired with smp_mb() at the beginning of the function */ smp_mb(); set_bit(EROFS_I_Z_INITED_BIT, &vi->flags); out_put_metabuf: erofs_put_metabuf(&buf); out_unlock: clear_and_wake_up_bit(EROFS_I_BL_Z_BIT, &vi->flags); return err; } int z_erofs_map_blocks_iter(struct inode *inode, struct erofs_map_blocks *map, int flags) { struct erofs_inode *const vi = EROFS_I(inode); int err = 0; trace_erofs_map_blocks_enter(inode, map, flags); if (map->m_la >= inode->i_size) { /* post-EOF unmapped extent */ map->m_llen = map->m_la + 1 - inode->i_size; map->m_la = inode->i_size; map->m_flags = 0; } else { err = z_erofs_fill_inode_lazy(inode); if (!err) { if ((vi->z_advise & Z_EROFS_ADVISE_FRAGMENT_PCLUSTER) && !vi->z_tailextent_headlcn) { map->m_la = 0; map->m_llen = inode->i_size; map->m_flags = EROFS_MAP_MAPPED | EROFS_MAP_FULL_MAPPED | EROFS_MAP_FRAGMENT; } else { err = z_erofs_do_map_blocks(inode, map, flags); } } if (!err && (map->m_flags & EROFS_MAP_ENCODED) && unlikely(map->m_plen > Z_EROFS_PCLUSTER_MAX_SIZE || map->m_llen > Z_EROFS_PCLUSTER_MAX_DSIZE)) err = -EOPNOTSUPP; if (err) map->m_llen = 0; } trace_erofs_map_blocks_exit(inode, map, flags, err); return err; } static int z_erofs_iomap_begin_report(struct inode *inode, loff_t offset, loff_t length, unsigned int flags, struct iomap *iomap, struct iomap *srcmap) { int ret; struct erofs_map_blocks map = { .m_la = offset }; ret = z_erofs_map_blocks_iter(inode, &map, EROFS_GET_BLOCKS_FIEMAP); erofs_put_metabuf(&map.buf); if (ret < 0) return ret; iomap->bdev = inode->i_sb->s_bdev; iomap->offset = map.m_la; iomap->length = map.m_llen; if (map.m_flags & EROFS_MAP_MAPPED) { iomap->type = IOMAP_MAPPED; iomap->addr = map.m_flags & EROFS_MAP_FRAGMENT ? IOMAP_NULL_ADDR : map.m_pa; } else { iomap->type = IOMAP_HOLE; iomap->addr = IOMAP_NULL_ADDR; /* * No strict rule on how to describe extents for post EOF, yet * we need to do like below. Otherwise, iomap itself will get * into an endless loop on post EOF. * * Calculate the effective offset by subtracting extent start * (map.m_la) from the requested offset, and add it to length. * (NB: offset >= map.m_la always) */ if (iomap->offset >= inode->i_size) iomap->length = length + offset - map.m_la; } iomap->flags = 0; return 0; } const struct iomap_ops z_erofs_iomap_report_ops = { .iomap_begin = z_erofs_iomap_begin_report, };
// SPDX-License-Identifier: GPL-2.0-only /* * Elan I2C/SMBus Touchpad driver * * Copyright (c) 2013 ELAN Microelectronics Corp. * * Author: 林政維 (Duson Lin) <[email protected]> * Author: KT Liao <[email protected]> * Version: 1.6.3 * * Based on cyapa driver: * copyright (c) 2011-2012 Cypress Semiconductor, Inc. * copyright (c) 2011-2012 Google, Inc. * * Trademarks are the property of their respective owners. */ #include <linux/acpi.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/input/mt.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/input.h> #include <linux/uaccess.h> #include <linux/jiffies.h> #include <linux/completion.h> #include <linux/of.h> #include <linux/pm_wakeirq.h> #include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/unaligned.h> #include "elan_i2c.h" #define DRIVER_NAME "elan_i2c" #define ELAN_VENDOR_ID 0x04f3 #define ETP_MAX_PRESSURE 255 #define ETP_FWIDTH_REDUCE 90 #define ETP_FINGER_WIDTH 15 #define ETP_RETRY_COUNT 3 /* quirks to control the device */ #define ETP_QUIRK_QUICK_WAKEUP BIT(0) /* The main device structure */ struct elan_tp_data { struct i2c_client *client; struct input_dev *input; struct input_dev *tp_input; /* trackpoint input node */ struct regulator *vcc; const struct elan_transport_ops *ops; /* for fw update */ struct completion fw_completion; bool in_fw_update; struct mutex sysfs_mutex; unsigned int max_x; unsigned int max_y; unsigned int width_x; unsigned int width_y; unsigned int x_res; unsigned int y_res; u8 pattern; u16 product_id; u8 fw_version; u8 sm_version; u8 iap_version; u16 fw_checksum; unsigned int report_features; unsigned int report_len; int pressure_adjustment; u8 mode; u16 ic_type; u16 fw_validpage_count; u16 fw_page_size; u32 fw_signature_address; u8 min_baseline; u8 max_baseline; bool baseline_ready; u8 clickpad; bool middle_button; u32 quirks; /* Various quirks */ }; static u32 elan_i2c_lookup_quirks(u16 ic_type, u16 product_id) { static const struct { u16 ic_type; u16 product_id; u32 quirks; } elan_i2c_quirks[] = { { 0x0D, ETP_PRODUCT_ID_DELBIN, ETP_QUIRK_QUICK_WAKEUP }, { 0x0D, ETP_PRODUCT_ID_WHITEBOX, ETP_QUIRK_QUICK_WAKEUP }, { 0x10, ETP_PRODUCT_ID_VOXEL, ETP_QUIRK_QUICK_WAKEUP }, { 0x14, ETP_PRODUCT_ID_MAGPIE, ETP_QUIRK_QUICK_WAKEUP }, { 0x14, ETP_PRODUCT_ID_BOBBA, ETP_QUIRK_QUICK_WAKEUP }, }; u32 quirks = 0; int i; for (i = 0; i < ARRAY_SIZE(elan_i2c_quirks); i++) { if (elan_i2c_quirks[i].ic_type == ic_type && elan_i2c_quirks[i].product_id == product_id) { quirks = elan_i2c_quirks[i].quirks; } } if (ic_type >= 0x0D && product_id >= 0x123) quirks |= ETP_QUIRK_QUICK_WAKEUP; return quirks; } static int elan_get_fwinfo(u16 ic_type, u8 iap_version, u16 *validpage_count, u32 *signature_address, u16 *page_size) { switch (ic_type) { case 0x00: case 0x06: case 0x08: *validpage_count = 512; break; case 0x03: case 0x07: case 0x09: case 0x0A: case 0x0B: case 0x0C: *validpage_count = 768; break; case 0x0D: *validpage_count = 896; break; case 0x0E: *validpage_count = 640; break; case 0x10: *validpage_count = 1024; break; case 0x11: *validpage_count = 1280; break; case 0x13: *validpage_count = 2048; break; case 0x14: case 0x15: *validpage_count = 1024; break; default: /* unknown ic type clear value */ *validpage_count = 0; *signature_address = 0; *page_size = 0; return -ENXIO; } *signature_address = (*validpage_count * ETP_FW_PAGE_SIZE) - ETP_FW_SIGNATURE_SIZE; if ((ic_type == 0x14 || ic_type == 0x15) && iap_version >= 2) { *validpage_count /= 8; *page_size = ETP_FW_PAGE_SIZE_512; } else if (ic_type >= 0x0D && iap_version >= 1) { *validpage_count /= 2; *page_size = ETP_FW_PAGE_SIZE_128; } else { *page_size = ETP_FW_PAGE_SIZE; } return 0; } static int elan_set_power(struct elan_tp_data *data, bool on) { int repeat = ETP_RETRY_COUNT; int error; do { error = data->ops->power_control(data->client, on); if (error >= 0) return 0; msleep(30); } while (--repeat > 0); dev_err(&data->client->dev, "failed to set power %s: %d\n", on ? "on" : "off", error); return error; } static int elan_sleep(struct elan_tp_data *data) { int repeat = ETP_RETRY_COUNT; int error; do { error = data->ops->sleep_control(data->client, true); if (!error) return 0; msleep(30); } while (--repeat > 0); return error; } static int elan_query_product(struct elan_tp_data *data) { int error; error = data->ops->get_product_id(data->client, &data->product_id); if (error) return error; error = data->ops->get_pattern(data->client, &data->pattern); if (error) return error; error = data->ops->get_sm_version(data->client, data->pattern, &data->ic_type, &data->sm_version, &data->clickpad); if (error) return error; return 0; } static int elan_check_ASUS_special_fw(struct elan_tp_data *data) { if (data->ic_type == 0x0E) { switch (data->product_id) { case 0x05 ... 0x07: case 0x09: case 0x13: return true; } } else if (data->ic_type == 0x08 && data->product_id == 0x26) { /* ASUS EeeBook X205TA */ return true; } return false; } static int __elan_initialize(struct elan_tp_data *data, bool skip_reset) { struct i2c_client *client = data->client; bool woken_up = false; int error; if (!skip_reset) { error = data->ops->initialize(client); if (error) { dev_err(&client->dev, "device initialize failed: %d\n", error); return error; } } error = elan_query_product(data); if (error) return error; /* * Some ASUS devices were shipped with firmware that requires * touchpads to be woken up first, before attempting to switch * them into absolute reporting mode. */ if (elan_check_ASUS_special_fw(data)) { error = data->ops->sleep_control(client, false); if (error) { dev_err(&client->dev, "failed to wake device up: %d\n", error); return error; } msleep(200); woken_up = true; } data->mode |= ETP_ENABLE_ABS; error = data->ops->set_mode(client, data->mode); if (error) { dev_err(&client->dev, "failed to switch to absolute mode: %d\n", error); return error; } if (!woken_up) { error = data->ops->sleep_control(client, false); if (error) { dev_err(&client->dev, "failed to wake device up: %d\n", error); return error; } } return 0; } static int elan_initialize(struct elan_tp_data *data, bool skip_reset) { int repeat = ETP_RETRY_COUNT; int error; do { error = __elan_initialize(data, skip_reset); if (!error) return 0; skip_reset = false; msleep(30); } while (--repeat > 0); return error; } static int elan_query_device_info(struct elan_tp_data *data) { int error; error = data->ops->get_version(data->client, data->pattern, false, &data->fw_version); if (error) return error; error = data->ops->get_checksum(data->client, false, &data->fw_checksum); if (error) return error; error = data->ops->get_version(data->client, data->pattern, true, &data->iap_version); if (error) return error; error = data->ops->get_pressure_adjustment(data->client, &data->pressure_adjustment); if (error) return error; error = data->ops->get_report_features(data->client, data->pattern, &data->report_features, &data->report_len); if (error) return error; data->quirks = elan_i2c_lookup_quirks(data->ic_type, data->product_id); error = elan_get_fwinfo(data->ic_type, data->iap_version, &data->fw_validpage_count, &data->fw_signature_address, &data->fw_page_size); if (error) dev_warn(&data->client->dev, "unexpected iap version %#04x (ic type: %#04x), firmware update will not work\n", data->iap_version, data->ic_type); return 0; } static unsigned int elan_convert_resolution(u8 val, u8 pattern) { /* * pattern <= 0x01: * (value from firmware) * 10 + 790 = dpi * else * ((value from firmware) + 3) * 100 = dpi */ int res = pattern <= 0x01 ? (int)(char)val * 10 + 790 : ((int)(char)val + 3) * 100; /* * We also have to convert dpi to dots/mm (*10/254 to avoid floating * point). */ return res * 10 / 254; } static int elan_query_device_parameters(struct elan_tp_data *data) { struct i2c_client *client = data->client; unsigned int x_traces, y_traces; u32 x_mm, y_mm; u8 hw_x_res, hw_y_res; int error; if (device_property_read_u32(&client->dev, "touchscreen-size-x", &data->max_x) || device_property_read_u32(&client->dev, "touchscreen-size-y", &data->max_y)) { error = data->ops->get_max(data->client, &data->max_x, &data->max_y); if (error) return error; } else { /* size is the maximum + 1 */ --data->max_x; --data->max_y; } if (device_property_read_u32(&client->dev, "elan,x_traces", &x_traces) || device_property_read_u32(&client->dev, "elan,y_traces", &y_traces)) { error = data->ops->get_num_traces(data->client, &x_traces, &y_traces); if (error) return error; } data->width_x = data->max_x / x_traces; data->width_y = data->max_y / y_traces; if (device_property_read_u32(&client->dev, "touchscreen-x-mm", &x_mm) || device_property_read_u32(&client->dev, "touchscreen-y-mm", &y_mm)) { error = data->ops->get_resolution(data->client, &hw_x_res, &hw_y_res); if (error) return error; data->x_res = elan_convert_resolution(hw_x_res, data->pattern); data->y_res = elan_convert_resolution(hw_y_res, data->pattern); } else { data->x_res = (data->max_x + 1) / x_mm; data->y_res = (data->max_y + 1) / y_mm; } if (device_property_read_bool(&client->dev, "elan,clickpad")) data->clickpad = 1; if (device_property_read_bool(&client->dev, "elan,middle-button")) data->middle_button = true; return 0; } /* ********************************************************** * IAP firmware updater related routines ********************************************************** */ static int elan_write_fw_block(struct elan_tp_data *data, u16 page_size, const u8 *page, u16 checksum, int idx) { int retry = ETP_RETRY_COUNT; int error; do { error = data->ops->write_fw_block(data->client, page_size, page, checksum, idx); if (!error) return 0; dev_dbg(&data->client->dev, "IAP retrying page %d (error: %d)\n", idx, error); } while (--retry > 0); return error; } static int __elan_update_firmware(struct elan_tp_data *data, const struct firmware *fw) { struct i2c_client *client = data->client; struct device *dev = &client->dev; int i, j; int error; u16 iap_start_addr; u16 boot_page_count; u16 sw_checksum = 0, fw_checksum = 0; error = data->ops->prepare_fw_update(client, data->ic_type, data->iap_version, data->fw_page_size); if (error) return error; iap_start_addr = get_unaligned_le16(&fw->data[ETP_IAP_START_ADDR * 2]); boot_page_count = (iap_start_addr * 2) / data->fw_page_size; for (i = boot_page_count; i < data->fw_validpage_count; i++) { u16 checksum = 0; const u8 *page = &fw->data[i * data->fw_page_size]; for (j = 0; j < data->fw_page_size; j += 2) checksum += ((page[j + 1] << 8) | page[j]); error = elan_write_fw_block(data, data->fw_page_size, page, checksum, i); if (error) { dev_err(dev, "write page %d fail: %d\n", i, error); return error; } sw_checksum += checksum; } /* Wait WDT reset and power on reset */ msleep(600); error = data->ops->finish_fw_update(client, &data->fw_completion); if (error) return error; error = data->ops->get_checksum(client, true, &fw_checksum); if (error) return error; if (sw_checksum != fw_checksum) { dev_err(dev, "checksum diff sw=[%04X], fw=[%04X]\n", sw_checksum, fw_checksum); return -EIO; } return 0; } static int elan_update_firmware(struct elan_tp_data *data, const struct firmware *fw) { struct i2c_client *client = data->client; int retval; dev_dbg(&client->dev, "Starting firmware update....\n"); guard(disable_irq)(&client->irq); data->in_fw_update = true; retval = __elan_update_firmware(data, fw); if (retval) { dev_err(&client->dev, "firmware update failed: %d\n", retval); data->ops->iap_reset(client); } else { /* Reinitialize TP after fw is updated */ elan_initialize(data, false); elan_query_device_info(data); } data->in_fw_update = false; return retval; } /* ******************************************************************* * SYSFS attributes ******************************************************************* */ static ssize_t elan_sysfs_read_fw_checksum(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); return sysfs_emit(buf, "0x%04x\n", data->fw_checksum); } static ssize_t elan_sysfs_read_product_id(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); return sysfs_emit(buf, ETP_PRODUCT_ID_FORMAT_STRING "\n", data->product_id); } static ssize_t elan_sysfs_read_fw_ver(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); return sysfs_emit(buf, "%d.0\n", data->fw_version); } static ssize_t elan_sysfs_read_sm_ver(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); return sysfs_emit(buf, "%d.0\n", data->sm_version); } static ssize_t elan_sysfs_read_iap_ver(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); return sysfs_emit(buf, "%d.0\n", data->iap_version); } static ssize_t elan_sysfs_update_fw(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct elan_tp_data *data = dev_get_drvdata(dev); int error; const u8 *fw_signature; static const u8 signature[] = {0xAA, 0x55, 0xCC, 0x33, 0xFF, 0xFF}; if (data->fw_validpage_count == 0) return -EINVAL; /* Look for a firmware with the product id appended. */ const char *fw_name __free(kfree) = kasprintf(GFP_KERNEL, ETP_FW_NAME, data->product_id); if (!fw_name) { dev_err(dev, "failed to allocate memory for firmware name\n"); return -ENOMEM; } dev_info(dev, "requesting fw '%s'\n", fw_name); const struct firmware *fw __free(firmware) = NULL; error = request_firmware(&fw, fw_name, dev); if (error) { dev_err(dev, "failed to request firmware: %d\n", error); return error; } /* Firmware file must match signature data */ fw_signature = &fw->data[data->fw_signature_address]; if (memcmp(fw_signature, signature, sizeof(signature)) != 0) { dev_err(dev, "signature mismatch (expected %*ph, got %*ph)\n", (int)sizeof(signature), signature, (int)sizeof(signature), fw_signature); return -EBADF; } scoped_cond_guard(mutex_intr, return -EINTR, &data->sysfs_mutex) { error = elan_update_firmware(data, fw); if (error) return error; } return count; } static int elan_calibrate(struct elan_tp_data *data) { struct i2c_client *client = data->client; struct device *dev = &client->dev; int tries = 20; int retval; int error; u8 val[ETP_CALIBRATE_MAX_LEN]; guard(disable_irq)(&client->irq); data->mode |= ETP_ENABLE_CALIBRATE; retval = data->ops->set_mode(client, data->mode); if (retval) { data->mode &= ~ETP_ENABLE_CALIBRATE; dev_err(dev, "failed to enable calibration mode: %d\n", retval); return retval; } retval = data->ops->calibrate(client); if (retval) { dev_err(dev, "failed to start calibration: %d\n", retval); goto out_disable_calibrate; } val[0] = 0xff; do { /* Wait 250ms before checking if calibration has completed. */ msleep(250); retval = data->ops->calibrate_result(client, val); if (retval) dev_err(dev, "failed to check calibration result: %d\n", retval); else if (val[0] == 0) break; /* calibration done */ } while (--tries); if (tries == 0) { dev_err(dev, "failed to calibrate. Timeout.\n"); retval = -ETIMEDOUT; } out_disable_calibrate: data->mode &= ~ETP_ENABLE_CALIBRATE; error = data->ops->set_mode(data->client, data->mode); if (error) { dev_err(dev, "failed to disable calibration mode: %d\n", error); if (!retval) retval = error; } return retval; } static ssize_t calibrate_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); int error; scoped_cond_guard(mutex_intr, return -EINTR, &data->sysfs_mutex) { error = elan_calibrate(data); if (error) return error; } return count; } static ssize_t elan_sysfs_read_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); int error; enum tp_mode mode; scoped_cond_guard(mutex_intr, return -EINTR, &data->sysfs_mutex) { error = data->ops->iap_get_mode(data->client, &mode); if (error) return error; } return sysfs_emit(buf, "%d\n", (int)mode); } static DEVICE_ATTR(product_id, S_IRUGO, elan_sysfs_read_product_id, NULL); static DEVICE_ATTR(firmware_version, S_IRUGO, elan_sysfs_read_fw_ver, NULL); static DEVICE_ATTR(sample_version, S_IRUGO, elan_sysfs_read_sm_ver, NULL); static DEVICE_ATTR(iap_version, S_IRUGO, elan_sysfs_read_iap_ver, NULL); static DEVICE_ATTR(fw_checksum, S_IRUGO, elan_sysfs_read_fw_checksum, NULL); static DEVICE_ATTR(mode, S_IRUGO, elan_sysfs_read_mode, NULL); static DEVICE_ATTR(update_fw, S_IWUSR, NULL, elan_sysfs_update_fw); static DEVICE_ATTR_WO(calibrate); static struct attribute *elan_sysfs_entries[] = { &dev_attr_product_id.attr, &dev_attr_firmware_version.attr, &dev_attr_sample_version.attr, &dev_attr_iap_version.attr, &dev_attr_fw_checksum.attr, &dev_attr_calibrate.attr, &dev_attr_mode.attr, &dev_attr_update_fw.attr, NULL, }; static const struct attribute_group elan_sysfs_group = { .attrs = elan_sysfs_entries, }; static int elan_acquire_baseline(struct elan_tp_data *data) { struct i2c_client *client = data->client; struct device *dev = &client->dev; int retval; int error; guard(disable_irq)(&client->irq); data->baseline_ready = false; data->mode |= ETP_ENABLE_CALIBRATE; retval = data->ops->set_mode(client, data->mode); if (retval) { data->mode &= ~ETP_ENABLE_CALIBRATE; dev_err(dev, "Failed to enable calibration mode to get baseline: %d\n", retval); return retval; } msleep(250); retval = data->ops->get_baseline_data(client, true, &data->max_baseline); if (retval) { dev_err(dev, "Failed to read max baseline from device: %d\n", retval); goto out_disable_calibrate; } retval = data->ops->get_baseline_data(client, false, &data->min_baseline); if (retval) { dev_err(dev, "Failed to read min baseline from device: %d\n", retval); goto out_disable_calibrate; } data->baseline_ready = true; out_disable_calibrate: data->mode &= ~ETP_ENABLE_CALIBRATE; error = data->ops->set_mode(client, data->mode); if (error) { dev_err(dev, "Failed to disable calibration mode after acquiring baseline: %d\n", error); if (!retval) retval = error; } return retval; } static ssize_t acquire_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); int error; scoped_cond_guard(mutex_intr, return -EINTR, &data->sysfs_mutex) { error = elan_acquire_baseline(data); if (error) return error; } return count; } static ssize_t min_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); scoped_guard(mutex_intr, &data->sysfs_mutex) { if (!data->baseline_ready) return -ENODATA; return sysfs_emit(buf, "%d", data->min_baseline); } return -EINTR; } static ssize_t max_show(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); scoped_guard(mutex_intr, &data->sysfs_mutex) { if (!data->baseline_ready) return -ENODATA; return sysfs_emit(buf, "%d", data->max_baseline); } return -EINTR; } static DEVICE_ATTR_WO(acquire); static DEVICE_ATTR_RO(min); static DEVICE_ATTR_RO(max); static struct attribute *elan_baseline_sysfs_entries[] = { &dev_attr_acquire.attr, &dev_attr_min.attr, &dev_attr_max.attr, NULL, }; static const struct attribute_group elan_baseline_sysfs_group = { .name = "baseline", .attrs = elan_baseline_sysfs_entries, }; static const struct attribute_group *elan_sysfs_groups[] = { &elan_sysfs_group, &elan_baseline_sysfs_group, NULL }; /* ****************************************************************** * Elan isr functions ****************************************************************** */ static void elan_report_contact(struct elan_tp_data *data, int contact_num, bool contact_valid, bool high_precision, u8 *packet, u8 *finger_data) { struct input_dev *input = data->input; unsigned int pos_x, pos_y; unsigned int pressure, scaled_pressure; if (contact_valid) { if (high_precision) { pos_x = get_unaligned_be16(&finger_data[0]); pos_y = get_unaligned_be16(&finger_data[2]); } else { pos_x = ((finger_data[0] & 0xf0) << 4) | finger_data[1]; pos_y = ((finger_data[0] & 0x0f) << 8) | finger_data[2]; } if (pos_x > data->max_x || pos_y > data->max_y) { dev_dbg(input->dev.parent, "[%d] x=%d y=%d over max (%d, %d)", contact_num, pos_x, pos_y, data->max_x, data->max_y); return; } pressure = finger_data[4]; scaled_pressure = pressure + data->pressure_adjustment; if (scaled_pressure > ETP_MAX_PRESSURE) scaled_pressure = ETP_MAX_PRESSURE; input_mt_slot(input, contact_num); input_mt_report_slot_state(input, MT_TOOL_FINGER, true); input_report_abs(input, ABS_MT_POSITION_X, pos_x); input_report_abs(input, ABS_MT_POSITION_Y, data->max_y - pos_y); input_report_abs(input, ABS_MT_PRESSURE, scaled_pressure); if (data->report_features & ETP_FEATURE_REPORT_MK) { unsigned int mk_x, mk_y, area_x, area_y; u8 mk_data = high_precision ? packet[ETP_MK_DATA_OFFSET + contact_num] : finger_data[3]; mk_x = mk_data & 0x0f; mk_y = mk_data >> 4; /* * To avoid treating large finger as palm, let's reduce * the width x and y per trace. */ area_x = mk_x * (data->width_x - ETP_FWIDTH_REDUCE); area_y = mk_y * (data->width_y - ETP_FWIDTH_REDUCE); input_report_abs(input, ABS_TOOL_WIDTH, mk_x); input_report_abs(input, ABS_MT_TOUCH_MAJOR, max(area_x, area_y)); input_report_abs(input, ABS_MT_TOUCH_MINOR, min(area_x, area_y)); } } else { input_mt_slot(input, contact_num); input_mt_report_slot_inactive(input); } } static void elan_report_absolute(struct elan_tp_data *data, u8 *packet, bool high_precision) { struct input_dev *input = data->input; u8 *finger_data = &packet[ETP_FINGER_DATA_OFFSET]; int i; u8 tp_info = packet[ETP_TOUCH_INFO_OFFSET]; u8 hover_info = packet[ETP_HOVER_INFO_OFFSET]; bool contact_valid, hover_event; pm_wakeup_event(&data->client->dev, 0); hover_event = hover_info & BIT(6); for (i = 0; i < ETP_MAX_FINGERS; i++) { contact_valid = tp_info & BIT(3 + i); elan_report_contact(data, i, contact_valid, high_precision, packet, finger_data); if (contact_valid) finger_data += ETP_FINGER_DATA_LEN; } input_report_key(input, BTN_LEFT, tp_info & BIT(0)); input_report_key(input, BTN_MIDDLE, tp_info & BIT(2)); input_report_key(input, BTN_RIGHT, tp_info & BIT(1)); input_report_abs(input, ABS_DISTANCE, hover_event != 0); input_mt_report_pointer_emulation(input, true); input_sync(input); } static void elan_report_trackpoint(struct elan_tp_data *data, u8 *report) { struct input_dev *input = data->tp_input; u8 *packet = &report[ETP_REPORT_ID_OFFSET + 1]; int x, y; pm_wakeup_event(&data->client->dev, 0); if (!data->tp_input) { dev_warn_once(&data->client->dev, "received a trackpoint report while no trackpoint device has been created. Please report upstream.\n"); return; } input_report_key(input, BTN_LEFT, packet[0] & 0x01); input_report_key(input, BTN_RIGHT, packet[0] & 0x02); input_report_key(input, BTN_MIDDLE, packet[0] & 0x04); if ((packet[3] & 0x0F) == 0x06) { x = packet[4] - (int)((packet[1] ^ 0x80) << 1); y = (int)((packet[2] ^ 0x80) << 1) - packet[5]; input_report_rel(input, REL_X, x); input_report_rel(input, REL_Y, y); } input_sync(input); } static irqreturn_t elan_isr(int irq, void *dev_id) { struct elan_tp_data *data = dev_id; int error; u8 report[ETP_MAX_REPORT_LEN]; /* * When device is connected to i2c bus, when all IAP page writes * complete, the driver will receive interrupt and must read * 0000 to confirm that IAP is finished. */ if (data->in_fw_update) { complete(&data->fw_completion); goto out; } error = data->ops->get_report(data->client, report, data->report_len); if (error) goto out; switch (report[ETP_REPORT_ID_OFFSET]) { case ETP_REPORT_ID: elan_report_absolute(data, report, false); break; case ETP_REPORT_ID2: elan_report_absolute(data, report, true); break; case ETP_TP_REPORT_ID: case ETP_TP_REPORT_ID2: elan_report_trackpoint(data, report); break; default: dev_err(&data->client->dev, "invalid report id data (%x)\n", report[ETP_REPORT_ID_OFFSET]); } out: return IRQ_HANDLED; } /* ****************************************************************** * Elan initialization functions ****************************************************************** */ static int elan_setup_trackpoint_input_device(struct elan_tp_data *data) { struct device *dev = &data->client->dev; struct input_dev *input; input = devm_input_allocate_device(dev); if (!input) return -ENOMEM; input->name = "Elan TrackPoint"; input->id.bustype = BUS_I2C; input->id.vendor = ELAN_VENDOR_ID; input->id.product = data->product_id; input_set_drvdata(input, data); input_set_capability(input, EV_REL, REL_X); input_set_capability(input, EV_REL, REL_Y); input_set_capability(input, EV_KEY, BTN_LEFT); input_set_capability(input, EV_KEY, BTN_RIGHT); input_set_capability(input, EV_KEY, BTN_MIDDLE); __set_bit(INPUT_PROP_POINTER, input->propbit); __set_bit(INPUT_PROP_POINTING_STICK, input->propbit); data->tp_input = input; return 0; } static int elan_setup_input_device(struct elan_tp_data *data) { struct device *dev = &data->client->dev; struct input_dev *input; unsigned int max_width = max(data->width_x, data->width_y); unsigned int min_width = min(data->width_x, data->width_y); int error; input = devm_input_allocate_device(dev); if (!input) return -ENOMEM; input->name = "Elan Touchpad"; input->id.bustype = BUS_I2C; input->id.vendor = ELAN_VENDOR_ID; input->id.product = data->product_id; input_set_drvdata(input, data); error = input_mt_init_slots(input, ETP_MAX_FINGERS, INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED); if (error) { dev_err(dev, "failed to initialize MT slots: %d\n", error); return error; } __set_bit(EV_ABS, input->evbit); __set_bit(INPUT_PROP_POINTER, input->propbit); if (data->clickpad) { __set_bit(INPUT_PROP_BUTTONPAD, input->propbit); } else { __set_bit(BTN_RIGHT, input->keybit); if (data->middle_button) __set_bit(BTN_MIDDLE, input->keybit); } __set_bit(BTN_LEFT, input->keybit); /* Set up ST parameters */ input_set_abs_params(input, ABS_X, 0, data->max_x, 0, 0); input_set_abs_params(input, ABS_Y, 0, data->max_y, 0, 0); input_abs_set_res(input, ABS_X, data->x_res); input_abs_set_res(input, ABS_Y, data->y_res); input_set_abs_params(input, ABS_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0); if (data->report_features & ETP_FEATURE_REPORT_MK) input_set_abs_params(input, ABS_TOOL_WIDTH, 0, ETP_FINGER_WIDTH, 0, 0); input_set_abs_params(input, ABS_DISTANCE, 0, 1, 0, 0); /* And MT parameters */ input_set_abs_params(input, ABS_MT_POSITION_X, 0, data->max_x, 0, 0); input_set_abs_params(input, ABS_MT_POSITION_Y, 0, data->max_y, 0, 0); input_abs_set_res(input, ABS_MT_POSITION_X, data->x_res); input_abs_set_res(input, ABS_MT_POSITION_Y, data->y_res); input_set_abs_params(input, ABS_MT_PRESSURE, 0, ETP_MAX_PRESSURE, 0, 0); if (data->report_features & ETP_FEATURE_REPORT_MK) { input_set_abs_params(input, ABS_MT_TOUCH_MAJOR, 0, ETP_FINGER_WIDTH * max_width, 0, 0); input_set_abs_params(input, ABS_MT_TOUCH_MINOR, 0, ETP_FINGER_WIDTH * min_width, 0, 0); } data->input = input; return 0; } static void elan_disable_regulator(void *_data) { struct elan_tp_data *data = _data; regulator_disable(data->vcc); } static int elan_probe(struct i2c_client *client) { const struct elan_transport_ops *transport_ops; struct device *dev = &client->dev; struct elan_tp_data *data; unsigned long irqflags; int error; if (IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_I2C) && i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { transport_ops = &elan_i2c_ops; } else if (IS_ENABLED(CONFIG_MOUSE_ELAN_I2C_SMBUS) && i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_I2C_BLOCK)) { transport_ops = &elan_smbus_ops; } else { dev_err(dev, "not a supported I2C/SMBus adapter\n"); return -EIO; } data = devm_kzalloc(dev, sizeof(struct elan_tp_data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); data->ops = transport_ops; data->client = client; init_completion(&data->fw_completion); mutex_init(&data->sysfs_mutex); data->vcc = devm_regulator_get(dev, "vcc"); if (IS_ERR(data->vcc)) return dev_err_probe(dev, PTR_ERR(data->vcc), "Failed to get 'vcc' regulator\n"); error = regulator_enable(data->vcc); if (error) { dev_err(dev, "Failed to enable regulator: %d\n", error); return error; } error = devm_add_action_or_reset(dev, elan_disable_regulator, data); if (error) { dev_err(dev, "Failed to add disable regulator action: %d\n", error); return error; } /* Make sure there is something at this address */ error = i2c_smbus_read_byte(client); if (error < 0) { dev_dbg(&client->dev, "nothing at this address: %d\n", error); return -ENXIO; } /* Initialize the touchpad. */ error = elan_initialize(data, false); if (error) return error; error = elan_query_device_info(data); if (error) return error; error = elan_query_device_parameters(data); if (error) return error; dev_info(dev, "Elan Touchpad: Module ID: 0x%04x, Firmware: 0x%04x, Sample: 0x%04x, IAP: 0x%04x\n", data->product_id, data->fw_version, data->sm_version, data->iap_version); dev_dbg(dev, "Elan Touchpad Extra Information:\n" " Max ABS X,Y: %d,%d\n" " Width X,Y: %d,%d\n" " Resolution X,Y: %d,%d (dots/mm)\n" " ic type: 0x%x\n" " info pattern: 0x%x\n", data->max_x, data->max_y, data->width_x, data->width_y, data->x_res, data->y_res, data->ic_type, data->pattern); /* Set up input device properties based on queried parameters. */ error = elan_setup_input_device(data); if (error) return error; if (device_property_read_bool(&client->dev, "elan,trackpoint")) { error = elan_setup_trackpoint_input_device(data); if (error) return error; } /* * Platform code (ACPI, DTS) should normally set up interrupt * for us, but in case it did not let's fall back to using falling * edge to be compatible with older Chromebooks. */ irqflags = irq_get_trigger_type(client->irq); if (!irqflags) irqflags = IRQF_TRIGGER_FALLING; error = devm_request_threaded_irq(dev, client->irq, NULL, elan_isr, irqflags | IRQF_ONESHOT, client->name, data); if (error) { dev_err(dev, "cannot register irq=%d\n", client->irq); return error; } error = input_register_device(data->input); if (error) { dev_err(dev, "failed to register input device: %d\n", error); return error; } if (data->tp_input) { error = input_register_device(data->tp_input); if (error) { dev_err(&client->dev, "failed to register TrackPoint input device: %d\n", error); return error; } } return 0; } static int __elan_suspend(struct elan_tp_data *data) { struct i2c_client *client = data->client; int error; if (device_may_wakeup(&client->dev)) return elan_sleep(data); /* Touchpad is not a wakeup source */ error = elan_set_power(data, false); if (error) return error; error = regulator_disable(data->vcc); if (error) { dev_err(&client->dev, "failed to disable regulator when suspending: %d\n", error); /* Attempt to power the chip back up */ elan_set_power(data, true); return error; } return 0; } static int elan_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); int error; /* * We are taking the mutex to make sure sysfs operations are * complete before we attempt to bring the device into low[er] * power mode. */ scoped_cond_guard(mutex_intr, return -EINTR, &data->sysfs_mutex) { disable_irq(client->irq); error = __elan_suspend(data); if (error) { enable_irq(client->irq); return error; } } return 0; } static int elan_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct elan_tp_data *data = i2c_get_clientdata(client); int error; if (!device_may_wakeup(dev)) { error = regulator_enable(data->vcc); if (error) { dev_err(dev, "error %d enabling regulator\n", error); goto err; } } error = elan_set_power(data, true); if (error) { dev_err(dev, "power up when resuming failed: %d\n", error); goto err; } error = elan_initialize(data, data->quirks & ETP_QUIRK_QUICK_WAKEUP); if (error) dev_err(dev, "initialize when resuming failed: %d\n", error); err: enable_irq(data->client->irq); return error; } static DEFINE_SIMPLE_DEV_PM_OPS(elan_pm_ops, elan_suspend, elan_resume); static const struct i2c_device_id elan_id[] = { { DRIVER_NAME }, { } }; MODULE_DEVICE_TABLE(i2c, elan_id); #ifdef CONFIG_ACPI #include <linux/input/elan-i2c-ids.h> MODULE_DEVICE_TABLE(acpi, elan_acpi_id); #endif #ifdef CONFIG_OF static const struct of_device_id elan_of_match[] = { { .compatible = "elan,ekth3000" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, elan_of_match); #endif static struct i2c_driver elan_driver = { .driver = { .name = DRIVER_NAME, .pm = pm_sleep_ptr(&elan_pm_ops), .acpi_match_table = ACPI_PTR(elan_acpi_id), .of_match_table = of_match_ptr(elan_of_match), .probe_type = PROBE_PREFER_ASYNCHRONOUS, .dev_groups = elan_sysfs_groups, }, .probe = elan_probe, .id_table = elan_id, }; module_i2c_driver(elan_driver); MODULE_AUTHOR("Duson Lin <[email protected]>"); MODULE_DESCRIPTION("Elan I2C/SMBus Touchpad driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* * netfilter module to enforce network quotas * * Sam Johnston <[email protected]> */ #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_quota.h> #include <linux/module.h> struct xt_quota_priv { spinlock_t lock; uint64_t quota; }; MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sam Johnston <[email protected]>"); MODULE_DESCRIPTION("Xtables: countdown quota match"); MODULE_ALIAS("ipt_quota"); MODULE_ALIAS("ip6t_quota"); static bool quota_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct xt_quota_info *q = (void *)par->matchinfo; struct xt_quota_priv *priv = q->master; bool ret = q->flags & XT_QUOTA_INVERT; spin_lock_bh(&priv->lock); if (priv->quota >= skb->len) { priv->quota -= skb->len; ret = !ret; } else { /* we do not allow even small packets from now on */ priv->quota = 0; } spin_unlock_bh(&priv->lock); return ret; } static int quota_mt_check(const struct xt_mtchk_param *par) { struct xt_quota_info *q = par->matchinfo; if (q->flags & ~XT_QUOTA_MASK) return -EINVAL; q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); if (q->master == NULL) return -ENOMEM; spin_lock_init(&q->master->lock); q->master->quota = q->quota; return 0; } static void quota_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_quota_info *q = par->matchinfo; kfree(q->master); } static struct xt_match quota_mt_reg __read_mostly = { .name = "quota", .revision = 0, .family = NFPROTO_UNSPEC, .match = quota_mt, .checkentry = quota_mt_check, .destroy = quota_mt_destroy, .matchsize = sizeof(struct xt_quota_info), .usersize = offsetof(struct xt_quota_info, master), .me = THIS_MODULE, }; static int __init quota_mt_init(void) { return xt_register_match(&quota_mt_reg); } static void __exit quota_mt_exit(void) { xt_unregister_match(&quota_mt_reg); } module_init(quota_mt_init); module_exit(quota_mt_exit);
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP3 Voltage Controller (VC) data * * Copyright (C) 2007, 2010 Texas Instruments, Inc. * Rajendra Nayak <[email protected]> * Lesly A M <[email protected]> * Thara Gopinath <[email protected]> * * Copyright (C) 2008, 2011 Nokia Corporation * Kalle Jokiniemi * Paul Walmsley */ #include <linux/io.h> #include <linux/err.h> #include <linux/init.h> #include "common.h" #include "prm-regbits-34xx.h" #include "voltage.h" #include "vc.h" /* * VC data common to 34xx/36xx chips * XXX This stuff presumably belongs in the vc3xxx.c or vc.c file. */ static struct omap_vc_common omap3_vc_common = { .bypass_val_reg = OMAP3_PRM_VC_BYPASS_VAL_OFFSET, .data_shift = OMAP3430_DATA_SHIFT, .slaveaddr_shift = OMAP3430_SLAVEADDR_SHIFT, .regaddr_shift = OMAP3430_REGADDR_SHIFT, .valid = OMAP3430_VALID_MASK, .cmd_on_shift = OMAP3430_VC_CMD_ON_SHIFT, .cmd_on_mask = OMAP3430_VC_CMD_ON_MASK, .cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT, .cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT, .cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT, .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK, .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK, .i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET, .i2c_mcode_mask = OMAP3430_MCODE_MASK, }; struct omap_vc_channel omap3_vc_mpu = { .flags = OMAP_VC_CHANNEL_DEFAULT, .common = &omap3_vc_common, .smps_sa_reg = OMAP3_PRM_VC_SMPS_SA_OFFSET, .smps_volra_reg = OMAP3_PRM_VC_SMPS_VOL_RA_OFFSET, .smps_cmdra_reg = OMAP3_PRM_VC_SMPS_CMD_RA_OFFSET, .cfg_channel_reg = OMAP3_PRM_VC_CH_CONF_OFFSET, .cmdval_reg = OMAP3_PRM_VC_CMD_VAL_0_OFFSET, .smps_sa_mask = OMAP3430_PRM_VC_SMPS_SA_SA0_MASK, .smps_volra_mask = OMAP3430_VOLRA0_MASK, .smps_cmdra_mask = OMAP3430_CMDRA0_MASK, .cfg_channel_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA0_SHIFT, }; struct omap_vc_channel omap3_vc_core = { .common = &omap3_vc_common, .smps_sa_reg = OMAP3_PRM_VC_SMPS_SA_OFFSET, .smps_volra_reg = OMAP3_PRM_VC_SMPS_VOL_RA_OFFSET, .smps_cmdra_reg = OMAP3_PRM_VC_SMPS_CMD_RA_OFFSET, .cfg_channel_reg = OMAP3_PRM_VC_CH_CONF_OFFSET, .cmdval_reg = OMAP3_PRM_VC_CMD_VAL_1_OFFSET, .smps_sa_mask = OMAP3430_PRM_VC_SMPS_SA_SA1_MASK, .smps_volra_mask = OMAP3430_VOLRA1_MASK, .smps_cmdra_mask = OMAP3430_CMDRA1_MASK, .cfg_channel_sa_shift = OMAP3430_PRM_VC_SMPS_SA_SA1_SHIFT, }; /* * Voltage levels for different operating modes: on, sleep, retention and off */ #define OMAP3_ON_VOLTAGE_UV 1200000 #define OMAP3_ONLP_VOLTAGE_UV 1000000 #define OMAP3_RET_VOLTAGE_UV 975000 #define OMAP3_OFF_VOLTAGE_UV 600000 struct omap_vc_param omap3_mpu_vc_data = { .on = OMAP3_ON_VOLTAGE_UV, .onlp = OMAP3_ONLP_VOLTAGE_UV, .ret = OMAP3_RET_VOLTAGE_UV, .off = OMAP3_OFF_VOLTAGE_UV, }; struct omap_vc_param omap3_core_vc_data = { .on = OMAP3_ON_VOLTAGE_UV, .onlp = OMAP3_ONLP_VOLTAGE_UV, .ret = OMAP3_RET_VOLTAGE_UV, .off = OMAP3_OFF_VOLTAGE_UV, };
/* SPDX-License-Identifier: GPL-2.0 */ /* * Interconnect framework driver for i.MX SoC * * Copyright (c) 2019, BayLibre * Copyright (c) 2019-2020, NXP * Author: Alexandre Bailon <[email protected]> * Author: Leonard Crestez <[email protected]> */ #ifndef __DRIVERS_INTERCONNECT_IMX_H #define __DRIVERS_INTERCONNECT_IMX_H #include <linux/args.h> #include <linux/bits.h> #include <linux/types.h> #include <linux/interconnect-provider.h> struct platform_device; #define IMX_ICC_MAX_LINKS 4 /* * High throughput priority level in Regulator mode * Read Priority in Fixed/Limiter mode */ #define PRIORITY0_SHIFT 0 /* * Low throughput priority level in Regulator mode * Write Priority in Fixed/Limiter mode */ #define PRIORITY1_SHIFT 8 #define PRIORITY_MASK 0x7 #define PRIORITY_COMP_MARK BIT(31) /* Must set */ #define IMX_NOC_MODE_FIXED 0 #define IMX_NOC_MODE_LIMITER 1 #define IMX_NOC_MODE_BYPASS 2 #define IMX_NOC_MODE_REGULATOR 3 #define IMX_NOC_MODE_UNCONFIGURED 0xFF #define IMX_NOC_PRIO_REG 0x8 #define IMX_NOC_MODE_REG 0xC #define IMX_NOC_BANDWIDTH_REG 0x10 #define IMX_NOC_SATURATION 0x14 #define IMX_NOC_EXT_CTL_REG 0x18 struct imx_icc_provider { void __iomem *noc_base; struct icc_provider provider; }; /* * struct imx_icc_node_adj - Describe a dynamic adjustable node */ struct imx_icc_node_adj_desc { unsigned int bw_mul, bw_div; const char *phandle_name; bool main_noc; }; /* * struct imx_icc_node - Describe an interconnect node * @name: name of the node * @id: an unique id to identify the node * @links: an array of slaves' node id * @num_links: number of id defined in links */ struct imx_icc_node_desc { const char *name; u16 id; u16 links[IMX_ICC_MAX_LINKS]; u16 num_links; const struct imx_icc_node_adj_desc *adj; }; /* * struct imx_icc_noc_setting - Describe an interconnect node setting * @reg: register offset inside the NoC * @prio_level: priority level * @mode: functional mode * @ext_control: external input control */ struct imx_icc_noc_setting { u32 reg; u32 prio_level; u32 mode; u32 ext_control; }; #define DEFINE_BUS_INTERCONNECT(_name, _id, _adj, ...) \ { \ .id = _id, \ .name = _name, \ .adj = _adj, \ .num_links = COUNT_ARGS(__VA_ARGS__), \ .links = { __VA_ARGS__ }, \ } #define DEFINE_BUS_MASTER(_name, _id, _dest_id) \ DEFINE_BUS_INTERCONNECT(_name, _id, NULL, _dest_id) #define DEFINE_BUS_SLAVE(_name, _id, _adj) \ DEFINE_BUS_INTERCONNECT(_name, _id, _adj) int imx_icc_register(struct platform_device *pdev, struct imx_icc_node_desc *nodes, int nodes_count, struct imx_icc_noc_setting *noc_settings); void imx_icc_unregister(struct platform_device *pdev); #endif /* __DRIVERS_INTERCONNECT_IMX_H */
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * Copyright (C) Protonic Holland * Author: David Jander <[email protected]> */ /dts-v1/; #include "stm32mp151.dtsi" #include "stm32mp15xc.dtsi" #include "stm32mp15-pinctrl.dtsi" #include "stm32mp15xxaa-pinctrl.dtsi" #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> #include <dt-bindings/leds/common.h> / { model = "Protonic MECT1S"; compatible = "prt,mect1s", "st,stm32mp151"; chosen { stdout-path = "serial0:1500000n8"; }; aliases { serial0 = &uart4; ethernet0 = &ethernet0; ethernet1 = &ethernet1; ethernet2 = &ethernet2; ethernet3 = &ethernet3; ethernet4 = &ethernet4; }; v3v3: regulator-v3v3 { compatible = "regulator-fixed"; regulator-name = "v3v3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; v5v: regulator-v5v { compatible = "regulator-fixed"; regulator-name = "v5v"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; regulator-always-on; }; led { compatible = "gpio-leds"; led-0 { color = <LED_COLOR_ID_RED>; function = LED_FUNCTION_DEBUG; gpios = <&gpioa 13 GPIO_ACTIVE_LOW>; }; led-1 { color = <LED_COLOR_ID_GREEN>; function = LED_FUNCTION_DEBUG; gpios = <&gpioa 14 GPIO_ACTIVE_LOW>; linux,default-trigger = "heartbeat"; }; }; }; &clk_hse { clock-frequency = <24000000>; }; &clk_lse { status = "disabled"; }; &ethernet0 { status = "okay"; pinctrl-0 = <&ethernet0_rmii_pins_a>; pinctrl-1 = <&ethernet0_rmii_sleep_pins_a>; pinctrl-names = "default", "sleep"; phy-mode = "rmii"; max-speed = <100>; st,eth-clk-sel; fixed-link { speed = <100>; full-duplex; }; mdio0: mdio { #address-cells = <1>; #size-cells = <0>; compatible = "snps,dwmac-mdio"; }; }; &{ethernet0_rmii_pins_a/pins1} { pinmux = <STM32_PINMUX('B', 12, AF11)>, /* ETH1_RMII_TXD0 */ <STM32_PINMUX('B', 13, AF11)>, /* ETH1_RMII_TXD1 */ <STM32_PINMUX('B', 11, AF11)>, /* ETH1_RMII_TX_EN */ <STM32_PINMUX('A', 2, AF11)>, /* ETH1_MDIO */ <STM32_PINMUX('C', 1, AF11)>; /* ETH1_MDC */ }; &{ethernet0_rmii_pins_a/pins2} { pinmux = <STM32_PINMUX('C', 4, AF11)>, /* ETH1_RMII_RXD0 */ <STM32_PINMUX('C', 5, AF11)>, /* ETH1_RMII_RXD1 */ <STM32_PINMUX('A', 1, AF11)>, /* ETH1_RMII_REF_CLK input */ <STM32_PINMUX('A', 7, AF11)>; /* ETH1_RMII_CRS_DV */ }; &{ethernet0_rmii_sleep_pins_a/pins1} { pinmux = <STM32_PINMUX('B', 12, ANALOG)>, /* ETH1_RMII_TXD0 */ <STM32_PINMUX('B', 13, ANALOG)>, /* ETH1_RMII_TXD1 */ <STM32_PINMUX('B', 11, ANALOG)>, /* ETH1_RMII_TX_EN */ <STM32_PINMUX('C', 4, ANALOG)>, /* ETH1_RMII_RXD0 */ <STM32_PINMUX('C', 5, ANALOG)>, /* ETH1_RMII_RXD1 */ <STM32_PINMUX('A', 1, ANALOG)>, /* ETH1_RMII_REF_CLK */ <STM32_PINMUX('A', 7, ANALOG)>; /* ETH1_RMII_CRS_DV */ }; &mdio0 { /* All this DP83TG720R PHYs can't be probed before switch@0 is * probed so we need to use compatible with PHYid */ /* TI DP83TG720R */ t1_phy0: ethernet-phy@8 { compatible = "ethernet-phy-id2000.a284"; reg = <8>; interrupts-extended = <&gpioi 5 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpioh 13 GPIO_ACTIVE_LOW>; reset-assert-us = <10>; reset-deassert-us = <35>; }; /* TI DP83TG720R */ t1_phy1: ethernet-phy@c { compatible = "ethernet-phy-id2000.a284"; reg = <12>; interrupts-extended = <&gpioj 0 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpioh 14 GPIO_ACTIVE_LOW>; reset-assert-us = <10>; reset-deassert-us = <35>; }; /* TI DP83TG720R */ t1_phy2: ethernet-phy@4 { compatible = "ethernet-phy-id2000.a284"; reg = <4>; interrupts-extended = <&gpioi 7 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpioh 15 GPIO_ACTIVE_LOW>; reset-assert-us = <10>; reset-deassert-us = <35>; }; /* TI DP83TG720R */ t1_phy3: ethernet-phy@d { compatible = "ethernet-phy-id2000.a284"; reg = <13>; interrupts-extended = <&gpioi 15 IRQ_TYPE_LEVEL_LOW>; reset-gpios = <&gpioi 13 GPIO_ACTIVE_LOW>; reset-assert-us = <10000>; reset-deassert-us = <1000>; }; }; &qspi { pinctrl-names = "default", "sleep"; pinctrl-0 = <&qspi_clk_pins_a &qspi_bk1_pins_a &qspi_cs1_pins_a>; pinctrl-1 = <&qspi_clk_sleep_pins_a &qspi_bk1_sleep_pins_a &qspi_cs1_sleep_pins_a>; status = "okay"; flash@0 { compatible = "jedec,spi-nor"; reg = <0>; spi-rx-bus-width = <4>; spi-max-frequency = <1000000>; #address-cells = <1>; #size-cells = <1>; }; }; &{qspi_bk1_pins_a/pins} { /delete-property/ bias-disable; bias-pull-up; drive-push-pull; slew-rate = <1>; }; &spi2 { pinctrl-0 = <&spi2_pins_b>; pinctrl-names = "default"; cs-gpios = <&gpioj 3 GPIO_ACTIVE_LOW>; /delete-property/dmas; /delete-property/dma-names; status = "okay"; switch@0 { compatible = "nxp,sja1105q"; reg = <0>; spi-max-frequency = <1000000>; spi-rx-delay-us = <1>; spi-tx-delay-us = <1>; spi-cpha; ports { #address-cells = <1>; #size-cells = <0>; ethernet1: port@0 { reg = <0>; label = "t10"; phy-mode = "rgmii-id"; phy-handle = <&t1_phy0>; }; ethernet2: port@1 { reg = <1>; label = "t11"; phy-mode = "rgmii-id"; phy-handle = <&t1_phy1>; }; ethernet3: port@2 { reg = <2>; label = "t12"; phy-mode = "rgmii-id"; phy-handle = <&t1_phy2>; }; ethernet4: port@3 { reg = <3>; label = "t13"; phy-mode = "rgmii-id"; phy-handle = <&t1_phy3>; }; port@4 { reg = <4>; label = "cpu"; ethernet = <&ethernet0>; phy-mode = "rmii"; /* RGMII mode is not working properly, using RMII instead. */ fixed-link { speed = <100>; full-duplex; }; }; }; }; }; &uart4 { pinctrl-names = "default", "sleep", "idle"; pinctrl-0 = <&uart4_pins_a>; pinctrl-1 = <&uart4_sleep_pins_a>; pinctrl-2 = <&uart4_idle_pins_a>; /delete-property/dmas; /delete-property/dma-names; status = "okay"; }; &usbh_ehci { status = "okay"; }; &usbotg_hs { dr_mode = "host"; pinctrl-0 = <&usbotg_hs_pins_a>; pinctrl-names = "default"; phys = <&usbphyc_port1 0>; phy-names = "usb2-phy"; vbus-supply = <&v5v>; status = "okay"; }; &usbphyc { status = "okay"; }; &usbphyc_port0 { phy-supply = <&v3v3>; }; &usbphyc_port1 { phy-supply = <&v3v3>; };
// SPDX-License-Identifier: GPL-2.0-only /* * STMicroelectronics uvis25 sensor driver * * Copyright 2017 STMicroelectronics Inc. * * Lorenzo Bianconi <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/iio/sysfs.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/interrupt.h> #include <linux/irqreturn.h> #include <linux/iio/trigger.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #include <linux/iio/buffer.h> #include <linux/regmap.h> #include "st_uvis25.h" #define ST_UVIS25_REG_WHOAMI_ADDR 0x0f #define ST_UVIS25_REG_WHOAMI_VAL 0xca #define ST_UVIS25_REG_CTRL1_ADDR 0x20 #define ST_UVIS25_REG_ODR_MASK BIT(0) #define ST_UVIS25_REG_BDU_MASK BIT(1) #define ST_UVIS25_REG_CTRL2_ADDR 0x21 #define ST_UVIS25_REG_BOOT_MASK BIT(7) #define ST_UVIS25_REG_CTRL3_ADDR 0x22 #define ST_UVIS25_REG_HL_MASK BIT(7) #define ST_UVIS25_REG_STATUS_ADDR 0x27 #define ST_UVIS25_REG_UV_DA_MASK BIT(0) #define ST_UVIS25_REG_OUT_ADDR 0x28 static const struct iio_chan_spec st_uvis25_channels[] = { { .type = IIO_UVINDEX, .address = ST_UVIS25_REG_OUT_ADDR, .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), .scan_index = 0, .scan_type = { .sign = 'u', .realbits = 8, .storagebits = 8, }, }, IIO_CHAN_SOFT_TIMESTAMP(1), }; static int st_uvis25_check_whoami(struct st_uvis25_hw *hw) { int err, data; err = regmap_read(hw->regmap, ST_UVIS25_REG_WHOAMI_ADDR, &data); if (err < 0) { dev_err(regmap_get_device(hw->regmap), "failed to read whoami register\n"); return err; } if (data != ST_UVIS25_REG_WHOAMI_VAL) { dev_err(regmap_get_device(hw->regmap), "wrong whoami {%02x vs %02x}\n", data, ST_UVIS25_REG_WHOAMI_VAL); return -ENODEV; } return 0; } static int st_uvis25_set_enable(struct st_uvis25_hw *hw, bool enable) { int err; err = regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR, ST_UVIS25_REG_ODR_MASK, enable); if (err < 0) return err; hw->enabled = enable; return 0; } static int st_uvis25_read_oneshot(struct st_uvis25_hw *hw, u8 addr, int *val) { int err; err = st_uvis25_set_enable(hw, true); if (err < 0) return err; msleep(1500); /* * in order to avoid possible race conditions with interrupt * generation, disable the sensor first and then poll output * register. That sequence guarantees the interrupt will be reset * when irq line is unmasked */ err = st_uvis25_set_enable(hw, false); if (err < 0) return err; err = regmap_read(hw->regmap, addr, val); return err < 0 ? err : IIO_VAL_INT; } static int st_uvis25_read_raw(struct iio_dev *iio_dev, struct iio_chan_spec const *ch, int *val, int *val2, long mask) { int ret; ret = iio_device_claim_direct_mode(iio_dev); if (ret) return ret; switch (mask) { case IIO_CHAN_INFO_PROCESSED: { struct st_uvis25_hw *hw = iio_priv(iio_dev); /* * mask irq line during oneshot read since the sensor * does not export the capability to disable data-ready line * in the register map and it is enabled by default. * If the line is unmasked during read_raw() it will be set * active and never reset since the trigger is disabled */ if (hw->irq > 0) disable_irq(hw->irq); ret = st_uvis25_read_oneshot(hw, ch->address, val); if (hw->irq > 0) enable_irq(hw->irq); break; } default: ret = -EINVAL; break; } iio_device_release_direct_mode(iio_dev); return ret; } static irqreturn_t st_uvis25_trigger_handler_thread(int irq, void *private) { struct st_uvis25_hw *hw = private; int err, status; err = regmap_read(hw->regmap, ST_UVIS25_REG_STATUS_ADDR, &status); if (err < 0) return IRQ_HANDLED; if (!(status & ST_UVIS25_REG_UV_DA_MASK)) return IRQ_NONE; iio_trigger_poll_nested(hw->trig); return IRQ_HANDLED; } static int st_uvis25_allocate_trigger(struct iio_dev *iio_dev) { struct st_uvis25_hw *hw = iio_priv(iio_dev); struct device *dev = regmap_get_device(hw->regmap); bool irq_active_low = false; unsigned long irq_type; int err; irq_type = irq_get_trigger_type(hw->irq); switch (irq_type) { case IRQF_TRIGGER_HIGH: case IRQF_TRIGGER_RISING: break; case IRQF_TRIGGER_LOW: case IRQF_TRIGGER_FALLING: irq_active_low = true; break; default: dev_info(dev, "mode %lx unsupported\n", irq_type); return -EINVAL; } err = regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL3_ADDR, ST_UVIS25_REG_HL_MASK, irq_active_low); if (err < 0) return err; err = devm_request_threaded_irq(dev, hw->irq, NULL, st_uvis25_trigger_handler_thread, irq_type | IRQF_ONESHOT, iio_dev->name, hw); if (err) { dev_err(dev, "failed to request trigger irq %d\n", hw->irq); return err; } hw->trig = devm_iio_trigger_alloc(dev, "%s-trigger", iio_dev->name); if (!hw->trig) return -ENOMEM; iio_trigger_set_drvdata(hw->trig, iio_dev); return devm_iio_trigger_register(dev, hw->trig); } static int st_uvis25_buffer_preenable(struct iio_dev *iio_dev) { return st_uvis25_set_enable(iio_priv(iio_dev), true); } static int st_uvis25_buffer_postdisable(struct iio_dev *iio_dev) { return st_uvis25_set_enable(iio_priv(iio_dev), false); } static const struct iio_buffer_setup_ops st_uvis25_buffer_ops = { .preenable = st_uvis25_buffer_preenable, .postdisable = st_uvis25_buffer_postdisable, }; static irqreturn_t st_uvis25_buffer_handler_thread(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *iio_dev = pf->indio_dev; struct st_uvis25_hw *hw = iio_priv(iio_dev); unsigned int val; int err; err = regmap_read(hw->regmap, ST_UVIS25_REG_OUT_ADDR, &val); if (err < 0) goto out; hw->scan.chan = val; iio_push_to_buffers_with_timestamp(iio_dev, &hw->scan, iio_get_time_ns(iio_dev)); out: iio_trigger_notify_done(hw->trig); return IRQ_HANDLED; } static int st_uvis25_allocate_buffer(struct iio_dev *iio_dev) { struct st_uvis25_hw *hw = iio_priv(iio_dev); return devm_iio_triggered_buffer_setup(regmap_get_device(hw->regmap), iio_dev, NULL, st_uvis25_buffer_handler_thread, &st_uvis25_buffer_ops); } static const struct iio_info st_uvis25_info = { .read_raw = st_uvis25_read_raw, }; static int st_uvis25_init_sensor(struct st_uvis25_hw *hw) { int err; err = regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL2_ADDR, ST_UVIS25_REG_BOOT_MASK, 1); if (err < 0) return err; msleep(2000); return regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR, ST_UVIS25_REG_BDU_MASK, 1); } int st_uvis25_probe(struct device *dev, int irq, struct regmap *regmap) { struct st_uvis25_hw *hw; struct iio_dev *iio_dev; int err; iio_dev = devm_iio_device_alloc(dev, sizeof(*hw)); if (!iio_dev) return -ENOMEM; dev_set_drvdata(dev, iio_dev); hw = iio_priv(iio_dev); hw->irq = irq; hw->regmap = regmap; err = st_uvis25_check_whoami(hw); if (err < 0) return err; iio_dev->modes = INDIO_DIRECT_MODE; iio_dev->channels = st_uvis25_channels; iio_dev->num_channels = ARRAY_SIZE(st_uvis25_channels); iio_dev->name = ST_UVIS25_DEV_NAME; iio_dev->info = &st_uvis25_info; err = st_uvis25_init_sensor(hw); if (err < 0) return err; if (hw->irq > 0) { err = st_uvis25_allocate_buffer(iio_dev); if (err < 0) return err; err = st_uvis25_allocate_trigger(iio_dev); if (err) return err; } return devm_iio_device_register(dev, iio_dev); } EXPORT_SYMBOL_NS(st_uvis25_probe, "IIO_UVIS25"); static int st_uvis25_suspend(struct device *dev) { struct iio_dev *iio_dev = dev_get_drvdata(dev); struct st_uvis25_hw *hw = iio_priv(iio_dev); return regmap_clear_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR, ST_UVIS25_REG_ODR_MASK); } static int st_uvis25_resume(struct device *dev) { struct iio_dev *iio_dev = dev_get_drvdata(dev); struct st_uvis25_hw *hw = iio_priv(iio_dev); if (hw->enabled) return regmap_update_bits(hw->regmap, ST_UVIS25_REG_CTRL1_ADDR, ST_UVIS25_REG_ODR_MASK, 1); return 0; } EXPORT_NS_SIMPLE_DEV_PM_OPS(st_uvis25_pm_ops, st_uvis25_suspend, st_uvis25_resume, IIO_UVIS25); MODULE_AUTHOR("Lorenzo Bianconi <[email protected]>"); MODULE_DESCRIPTION("STMicroelectronics uvis25 sensor driver"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 */ /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. */ #ifndef __IA_CSS_DVS_PARAM_H #define __IA_CSS_DVS_PARAM_H #include <type_support.h> #if !defined(ENABLE_TPROXY) && !defined(ENABLE_CRUN_FOR_TD) && !defined(PARAMBIN_GENERATION) #include "dma.h" #endif /* !defined(ENABLE_TPROXY) && !defined(ENABLE_CRUN_FOR_TD) */ #include "uds/uds_1.0/ia_css_uds_param.h" /* dvserence frame */ struct sh_css_isp_dvs_isp_config { u32 num_horizontal_blocks; u32 num_vertical_blocks; }; #endif /* __IA_CSS_DVS_PARAM_H */
/* * Copyright 2011 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher */ #include <linux/firmware.h> #include "radeon.h" #include "rv770d.h" #include "rv770_dpm.h" #include "rv770_smc.h" #include "atom.h" #include "radeon_ucode.h" #define FIRST_SMC_INT_VECT_REG 0xFFD8 #define FIRST_INT_VECT_S19 0xFFC0 static const u8 rv770_smc_int_vectors[] = { 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x0C, 0xD7, 0x08, 0x2B, 0x08, 0x10, 0x03, 0x51, 0x03, 0x51, 0x03, 0x51, 0x03, 0x51 }; static const u8 rv730_smc_int_vectors[] = { 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x08, 0x15, 0x0C, 0xBB, 0x08, 0x30, 0x08, 0x15, 0x03, 0x56, 0x03, 0x56, 0x03, 0x56, 0x03, 0x56 }; static const u8 rv710_smc_int_vectors[] = { 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x08, 0x04, 0x0C, 0xCB, 0x08, 0x1F, 0x08, 0x04, 0x03, 0x51, 0x03, 0x51, 0x03, 0x51, 0x03, 0x51 }; static const u8 rv740_smc_int_vectors[] = { 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x08, 0x10, 0x0C, 0xD7, 0x08, 0x2B, 0x08, 0x10, 0x03, 0x51, 0x03, 0x51, 0x03, 0x51, 0x03, 0x51 }; static const u8 cedar_smc_int_vectors[] = { 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x11, 0x8B, 0x0B, 0x20, 0x0B, 0x05, 0x04, 0xF6, 0x04, 0xF6, 0x04, 0xF6, 0x04, 0xF6 }; static const u8 redwood_smc_int_vectors[] = { 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x11, 0x8B, 0x0B, 0x20, 0x0B, 0x05, 0x04, 0xF6, 0x04, 0xF6, 0x04, 0xF6, 0x04, 0xF6 }; static const u8 juniper_smc_int_vectors[] = { 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x11, 0x8B, 0x0B, 0x20, 0x0B, 0x05, 0x04, 0xF6, 0x04, 0xF6, 0x04, 0xF6, 0x04, 0xF6 }; static const u8 cypress_smc_int_vectors[] = { 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x0B, 0x05, 0x11, 0x8B, 0x0B, 0x20, 0x0B, 0x05, 0x04, 0xF6, 0x04, 0xF6, 0x04, 0xF6, 0x04, 0xF6 }; static const u8 barts_smc_int_vectors[] = { 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x12, 0xAA, 0x0C, 0x2F, 0x15, 0xF6, 0x15, 0xF6, 0x05, 0x0A, 0x05, 0x0A, 0x05, 0x0A }; static const u8 turks_smc_int_vectors[] = { 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x12, 0xAA, 0x0C, 0x2F, 0x15, 0xF6, 0x15, 0xF6, 0x05, 0x0A, 0x05, 0x0A, 0x05, 0x0A }; static const u8 caicos_smc_int_vectors[] = { 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x0C, 0x14, 0x12, 0xAA, 0x0C, 0x2F, 0x15, 0xF6, 0x15, 0xF6, 0x05, 0x0A, 0x05, 0x0A, 0x05, 0x0A }; static const u8 cayman_smc_int_vectors[] = { 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x12, 0x05, 0x18, 0xEA, 0x12, 0x20, 0x1C, 0x34, 0x1C, 0x34, 0x08, 0x72, 0x08, 0x72, 0x08, 0x72 }; static int rv770_set_smc_sram_address(struct radeon_device *rdev, u16 smc_address, u16 limit) { u32 addr; if (smc_address & 3) return -EINVAL; if ((smc_address + 3) > limit) return -EINVAL; addr = smc_address; addr |= SMC_SRAM_AUTO_INC_DIS; WREG32(SMC_SRAM_ADDR, addr); return 0; } int rv770_copy_bytes_to_smc(struct radeon_device *rdev, u16 smc_start_address, const u8 *src, u16 byte_count, u16 limit) { unsigned long flags; u32 data, original_data, extra_shift; u16 addr; int ret = 0; if (smc_start_address & 3) return -EINVAL; if ((smc_start_address + byte_count) > limit) return -EINVAL; addr = smc_start_address; spin_lock_irqsave(&rdev->smc_idx_lock, flags); while (byte_count >= 4) { /* SMC address space is BE */ data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; ret = rv770_set_smc_sram_address(rdev, addr, limit); if (ret) goto done; WREG32(SMC_SRAM_DATA, data); src += 4; byte_count -= 4; addr += 4; } /* RMW for final bytes */ if (byte_count > 0) { data = 0; ret = rv770_set_smc_sram_address(rdev, addr, limit); if (ret) goto done; original_data = RREG32(SMC_SRAM_DATA); extra_shift = 8 * (4 - byte_count); while (byte_count > 0) { /* SMC address space is BE */ data = (data << 8) + *src++; byte_count--; } data <<= extra_shift; data |= (original_data & ~((~0UL) << extra_shift)); ret = rv770_set_smc_sram_address(rdev, addr, limit); if (ret) goto done; WREG32(SMC_SRAM_DATA, data); } done: spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); return ret; } static int rv770_program_interrupt_vectors(struct radeon_device *rdev, u32 smc_first_vector, const u8 *src, u32 byte_count) { u32 tmp, i; if (byte_count % 4) return -EINVAL; if (smc_first_vector < FIRST_SMC_INT_VECT_REG) { tmp = FIRST_SMC_INT_VECT_REG - smc_first_vector; if (tmp > byte_count) return 0; byte_count -= tmp; src += tmp; smc_first_vector = FIRST_SMC_INT_VECT_REG; } for (i = 0; i < byte_count; i += 4) { /* SMC address space is BE */ tmp = (src[i] << 24) | (src[i + 1] << 16) | (src[i + 2] << 8) | src[i + 3]; WREG32(SMC_ISR_FFD8_FFDB + i, tmp); } return 0; } void rv770_start_smc(struct radeon_device *rdev) { WREG32_P(SMC_IO, SMC_RST_N, ~SMC_RST_N); } void rv770_reset_smc(struct radeon_device *rdev) { WREG32_P(SMC_IO, 0, ~SMC_RST_N); } void rv770_stop_smc_clock(struct radeon_device *rdev) { WREG32_P(SMC_IO, 0, ~SMC_CLK_EN); } void rv770_start_smc_clock(struct radeon_device *rdev) { WREG32_P(SMC_IO, SMC_CLK_EN, ~SMC_CLK_EN); } bool rv770_is_smc_running(struct radeon_device *rdev) { u32 tmp; tmp = RREG32(SMC_IO); if ((tmp & SMC_RST_N) && (tmp & SMC_CLK_EN)) return true; else return false; } PPSMC_Result rv770_send_msg_to_smc(struct radeon_device *rdev, PPSMC_Msg msg) { u32 tmp; int i; PPSMC_Result result; if (!rv770_is_smc_running(rdev)) return PPSMC_Result_Failed; WREG32_P(SMC_MSG, HOST_SMC_MSG(msg), ~HOST_SMC_MSG_MASK); for (i = 0; i < rdev->usec_timeout; i++) { tmp = RREG32(SMC_MSG) & HOST_SMC_RESP_MASK; tmp >>= HOST_SMC_RESP_SHIFT; if (tmp != 0) break; udelay(1); } tmp = RREG32(SMC_MSG) & HOST_SMC_RESP_MASK; tmp >>= HOST_SMC_RESP_SHIFT; result = (PPSMC_Result)tmp; return result; } PPSMC_Result rv770_wait_for_smc_inactive(struct radeon_device *rdev) { int i; PPSMC_Result result = PPSMC_Result_OK; if (!rv770_is_smc_running(rdev)) return result; for (i = 0; i < rdev->usec_timeout; i++) { if (RREG32(SMC_IO) & SMC_STOP_MODE) break; udelay(1); } return result; } static void rv770_clear_smc_sram(struct radeon_device *rdev, u16 limit) { unsigned long flags; u16 i; spin_lock_irqsave(&rdev->smc_idx_lock, flags); for (i = 0; i < limit; i += 4) { rv770_set_smc_sram_address(rdev, i, limit); WREG32(SMC_SRAM_DATA, 0); } spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); } int rv770_load_smc_ucode(struct radeon_device *rdev, u16 limit) { int ret; const u8 *int_vect; u16 int_vect_start_address; u16 int_vect_size; const u8 *ucode_data; u16 ucode_start_address; u16 ucode_size; if (!rdev->smc_fw) return -EINVAL; rv770_clear_smc_sram(rdev, limit); switch (rdev->family) { case CHIP_RV770: ucode_start_address = RV770_SMC_UCODE_START; ucode_size = RV770_SMC_UCODE_SIZE; int_vect = (const u8 *)&rv770_smc_int_vectors; int_vect_start_address = RV770_SMC_INT_VECTOR_START; int_vect_size = RV770_SMC_INT_VECTOR_SIZE; break; case CHIP_RV730: ucode_start_address = RV730_SMC_UCODE_START; ucode_size = RV730_SMC_UCODE_SIZE; int_vect = (const u8 *)&rv730_smc_int_vectors; int_vect_start_address = RV730_SMC_INT_VECTOR_START; int_vect_size = RV730_SMC_INT_VECTOR_SIZE; break; case CHIP_RV710: ucode_start_address = RV710_SMC_UCODE_START; ucode_size = RV710_SMC_UCODE_SIZE; int_vect = (const u8 *)&rv710_smc_int_vectors; int_vect_start_address = RV710_SMC_INT_VECTOR_START; int_vect_size = RV710_SMC_INT_VECTOR_SIZE; break; case CHIP_RV740: ucode_start_address = RV740_SMC_UCODE_START; ucode_size = RV740_SMC_UCODE_SIZE; int_vect = (const u8 *)&rv740_smc_int_vectors; int_vect_start_address = RV740_SMC_INT_VECTOR_START; int_vect_size = RV740_SMC_INT_VECTOR_SIZE; break; case CHIP_CEDAR: ucode_start_address = CEDAR_SMC_UCODE_START; ucode_size = CEDAR_SMC_UCODE_SIZE; int_vect = (const u8 *)&cedar_smc_int_vectors; int_vect_start_address = CEDAR_SMC_INT_VECTOR_START; int_vect_size = CEDAR_SMC_INT_VECTOR_SIZE; break; case CHIP_REDWOOD: ucode_start_address = REDWOOD_SMC_UCODE_START; ucode_size = REDWOOD_SMC_UCODE_SIZE; int_vect = (const u8 *)&redwood_smc_int_vectors; int_vect_start_address = REDWOOD_SMC_INT_VECTOR_START; int_vect_size = REDWOOD_SMC_INT_VECTOR_SIZE; break; case CHIP_JUNIPER: ucode_start_address = JUNIPER_SMC_UCODE_START; ucode_size = JUNIPER_SMC_UCODE_SIZE; int_vect = (const u8 *)&juniper_smc_int_vectors; int_vect_start_address = JUNIPER_SMC_INT_VECTOR_START; int_vect_size = JUNIPER_SMC_INT_VECTOR_SIZE; break; case CHIP_CYPRESS: case CHIP_HEMLOCK: ucode_start_address = CYPRESS_SMC_UCODE_START; ucode_size = CYPRESS_SMC_UCODE_SIZE; int_vect = (const u8 *)&cypress_smc_int_vectors; int_vect_start_address = CYPRESS_SMC_INT_VECTOR_START; int_vect_size = CYPRESS_SMC_INT_VECTOR_SIZE; break; case CHIP_BARTS: ucode_start_address = BARTS_SMC_UCODE_START; ucode_size = BARTS_SMC_UCODE_SIZE; int_vect = (const u8 *)&barts_smc_int_vectors; int_vect_start_address = BARTS_SMC_INT_VECTOR_START; int_vect_size = BARTS_SMC_INT_VECTOR_SIZE; break; case CHIP_TURKS: ucode_start_address = TURKS_SMC_UCODE_START; ucode_size = TURKS_SMC_UCODE_SIZE; int_vect = (const u8 *)&turks_smc_int_vectors; int_vect_start_address = TURKS_SMC_INT_VECTOR_START; int_vect_size = TURKS_SMC_INT_VECTOR_SIZE; break; case CHIP_CAICOS: ucode_start_address = CAICOS_SMC_UCODE_START; ucode_size = CAICOS_SMC_UCODE_SIZE; int_vect = (const u8 *)&caicos_smc_int_vectors; int_vect_start_address = CAICOS_SMC_INT_VECTOR_START; int_vect_size = CAICOS_SMC_INT_VECTOR_SIZE; break; case CHIP_CAYMAN: ucode_start_address = CAYMAN_SMC_UCODE_START; ucode_size = CAYMAN_SMC_UCODE_SIZE; int_vect = (const u8 *)&cayman_smc_int_vectors; int_vect_start_address = CAYMAN_SMC_INT_VECTOR_START; int_vect_size = CAYMAN_SMC_INT_VECTOR_SIZE; break; default: DRM_ERROR("unknown asic in smc ucode loader\n"); BUG(); } /* load the ucode */ ucode_data = (const u8 *)rdev->smc_fw->data; ret = rv770_copy_bytes_to_smc(rdev, ucode_start_address, ucode_data, ucode_size, limit); if (ret) return ret; /* set up the int vectors */ ret = rv770_program_interrupt_vectors(rdev, int_vect_start_address, int_vect, int_vect_size); if (ret) return ret; return 0; } int rv770_read_smc_sram_dword(struct radeon_device *rdev, u16 smc_address, u32 *value, u16 limit) { unsigned long flags; int ret; spin_lock_irqsave(&rdev->smc_idx_lock, flags); ret = rv770_set_smc_sram_address(rdev, smc_address, limit); if (ret == 0) *value = RREG32(SMC_SRAM_DATA); spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); return ret; } int rv770_write_smc_sram_dword(struct radeon_device *rdev, u16 smc_address, u32 value, u16 limit) { unsigned long flags; int ret; spin_lock_irqsave(&rdev->smc_idx_lock, flags); ret = rv770_set_smc_sram_address(rdev, smc_address, limit); if (ret == 0) WREG32(SMC_SRAM_DATA, value); spin_unlock_irqrestore(&rdev->smc_idx_lock, flags); return ret; }
/* * Copyright 2020 Mauro Rossi <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef DAL_DC_DCE_DCE60_CLK_MGR_H_ #define DAL_DC_DCE_DCE60_CLK_MGR_H_ #include "dc.h" void dce60_clk_mgr_construct( struct dc_context *ctx, struct clk_mgr_internal *clk_mgr_dce); #endif /* DAL_DC_DCE_DCE60_CLK_MGR_H_ */
// SPDX-License-Identifier: GPL-2.0 /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. */ #include "sh_css_param_dvs.h" #include <assert_support.h> #include <type_support.h> #include <ia_css_err.h> #include <ia_css_types.h> #include "ia_css_debug.h" static struct ia_css_dvs_6axis_config * alloc_dvs_6axis_table(const struct ia_css_resolution *frame_res, struct ia_css_dvs_6axis_config *dvs_config_src) { unsigned int width_y = 0; unsigned int height_y = 0; unsigned int width_uv = 0; unsigned int height_uv = 0; int err = 0; struct ia_css_dvs_6axis_config *dvs_config = NULL; dvs_config = kvmalloc(sizeof(struct ia_css_dvs_6axis_config), GFP_KERNEL); if (!dvs_config) { IA_CSS_ERROR("out of memory"); err = -ENOMEM; } else { /*Initialize new struct with latest config settings*/ if (dvs_config_src) { dvs_config->width_y = width_y = dvs_config_src->width_y; dvs_config->height_y = height_y = dvs_config_src->height_y; dvs_config->width_uv = width_uv = dvs_config_src->width_uv; dvs_config->height_uv = height_uv = dvs_config_src->height_uv; IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y); } else if (frame_res) { dvs_config->width_y = width_y = DVS_TABLE_IN_BLOCKDIM_X_LUMA(frame_res->width); dvs_config->height_y = height_y = DVS_TABLE_IN_BLOCKDIM_Y_LUMA( frame_res->height); dvs_config->width_uv = width_uv = DVS_TABLE_IN_BLOCKDIM_X_CHROMA( frame_res->width / 2); /* UV = Y/2, depens on colour format YUV 4.2.0*/ dvs_config->height_uv = height_uv = DVS_TABLE_IN_BLOCKDIM_Y_CHROMA( frame_res->height / 2);/* UV = Y/2, depens on colour format YUV 4.2.0*/ IA_CSS_LOG("alloc_dvs_6axis_table Y: W %d H %d", width_y, height_y); } /* Generate Y buffers */ dvs_config->xcoords_y = kvmalloc(width_y * height_y * sizeof(uint32_t), GFP_KERNEL); if (!dvs_config->xcoords_y) { IA_CSS_ERROR("out of memory"); err = -ENOMEM; goto exit; } dvs_config->ycoords_y = kvmalloc(width_y * height_y * sizeof(uint32_t), GFP_KERNEL); if (!dvs_config->ycoords_y) { IA_CSS_ERROR("out of memory"); err = -ENOMEM; goto exit; } /* Generate UV buffers */ IA_CSS_LOG("UV W %d H %d", width_uv, height_uv); dvs_config->xcoords_uv = kvmalloc(width_uv * height_uv * sizeof(uint32_t), GFP_KERNEL); if (!dvs_config->xcoords_uv) { IA_CSS_ERROR("out of memory"); err = -ENOMEM; goto exit; } dvs_config->ycoords_uv = kvmalloc(width_uv * height_uv * sizeof(uint32_t), GFP_KERNEL); if (!dvs_config->ycoords_uv) { IA_CSS_ERROR("out of memory"); err = -ENOMEM; } exit: if (err) { free_dvs_6axis_table( &dvs_config); /* we might have allocated some memory, release this */ dvs_config = NULL; } } IA_CSS_LEAVE("dvs_config=%p", dvs_config); return dvs_config; } static void init_dvs_6axis_table_from_default(struct ia_css_dvs_6axis_config *dvs_config, const struct ia_css_resolution *dvs_offset) { unsigned int x, y; unsigned int width_y = dvs_config->width_y; unsigned int height_y = dvs_config->height_y; unsigned int width_uv = dvs_config->width_uv; unsigned int height_uv = dvs_config->height_uv; IA_CSS_LOG("Env_X=%d, Env_Y=%d, width_y=%d, height_y=%d", dvs_offset->width, dvs_offset->height, width_y, height_y); for (y = 0; y < height_y; y++) { for (x = 0; x < width_y; x++) { dvs_config->xcoords_y[y * width_y + x] = (dvs_offset->width + x * DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS; } } for (y = 0; y < height_y; y++) { for (x = 0; x < width_y; x++) { dvs_config->ycoords_y[y * width_y + x] = (dvs_offset->height + y * DVS_BLOCKDIM_Y_LUMA) << DVS_COORD_FRAC_BITS; } } for (y = 0; y < height_uv; y++) { for (x = 0; x < width_uv; x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */ dvs_config->xcoords_uv[y * width_uv + x] = ((dvs_offset->width / 2) + x * DVS_BLOCKDIM_X) << DVS_COORD_FRAC_BITS; } } for (y = 0; y < height_uv; y++) { for (x = 0; x < width_uv; x++) { /* Envelope dimensions set in Ypixels hence offset UV = offset Y/2 */ dvs_config->ycoords_uv[y * width_uv + x] = ((dvs_offset->height / 2) + y * DVS_BLOCKDIM_Y_CHROMA) << DVS_COORD_FRAC_BITS; } } } static void init_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config, struct ia_css_dvs_6axis_config *dvs_config_src) { unsigned int width_y = dvs_config->width_y; unsigned int height_y = dvs_config->height_y; unsigned int width_uv = dvs_config->width_uv; unsigned int height_uv = dvs_config->height_uv; memcpy(dvs_config->xcoords_y, dvs_config_src->xcoords_y, (width_y * height_y * sizeof(uint32_t))); memcpy(dvs_config->ycoords_y, dvs_config_src->ycoords_y, (width_y * height_y * sizeof(uint32_t))); memcpy(dvs_config->xcoords_uv, dvs_config_src->xcoords_uv, (width_uv * height_uv * sizeof(uint32_t))); memcpy(dvs_config->ycoords_uv, dvs_config_src->ycoords_uv, (width_uv * height_uv * sizeof(uint32_t))); } struct ia_css_dvs_6axis_config * generate_dvs_6axis_table(const struct ia_css_resolution *frame_res, const struct ia_css_resolution *dvs_offset) { struct ia_css_dvs_6axis_config *dvs_6axis_table; assert(frame_res); assert(dvs_offset); dvs_6axis_table = alloc_dvs_6axis_table(frame_res, NULL); if (dvs_6axis_table) { init_dvs_6axis_table_from_default(dvs_6axis_table, dvs_offset); return dvs_6axis_table; } return NULL; } struct ia_css_dvs_6axis_config * generate_dvs_6axis_table_from_config(struct ia_css_dvs_6axis_config *dvs_config_src) { struct ia_css_dvs_6axis_config *dvs_6axis_table; assert(dvs_config_src); dvs_6axis_table = alloc_dvs_6axis_table(NULL, dvs_config_src); if (dvs_6axis_table) { init_dvs_6axis_table_from_config(dvs_6axis_table, dvs_config_src); return dvs_6axis_table; } return NULL; } void free_dvs_6axis_table(struct ia_css_dvs_6axis_config **dvs_6axis_config) { if ((dvs_6axis_config) && (*dvs_6axis_config)) { IA_CSS_ENTER_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config)); if ((*dvs_6axis_config)->xcoords_y) { kvfree((*dvs_6axis_config)->xcoords_y); (*dvs_6axis_config)->xcoords_y = NULL; } if ((*dvs_6axis_config)->ycoords_y) { kvfree((*dvs_6axis_config)->ycoords_y); (*dvs_6axis_config)->ycoords_y = NULL; } /* Free up UV buffers */ if ((*dvs_6axis_config)->xcoords_uv) { kvfree((*dvs_6axis_config)->xcoords_uv); (*dvs_6axis_config)->xcoords_uv = NULL; } if ((*dvs_6axis_config)->ycoords_uv) { kvfree((*dvs_6axis_config)->ycoords_uv); (*dvs_6axis_config)->ycoords_uv = NULL; } IA_CSS_LEAVE_PRIVATE("dvs_6axis_config %p", (*dvs_6axis_config)); kvfree(*dvs_6axis_config); *dvs_6axis_config = NULL; } } void copy_dvs_6axis_table(struct ia_css_dvs_6axis_config *dvs_config_dst, const struct ia_css_dvs_6axis_config *dvs_config_src) { unsigned int width_y; unsigned int height_y; unsigned int width_uv; unsigned int height_uv; assert(dvs_config_src); assert(dvs_config_dst); assert(dvs_config_src->xcoords_y); assert(dvs_config_src->xcoords_uv); assert(dvs_config_src->ycoords_y); assert(dvs_config_src->ycoords_uv); assert(dvs_config_src->width_y == dvs_config_dst->width_y); assert(dvs_config_src->width_uv == dvs_config_dst->width_uv); assert(dvs_config_src->height_y == dvs_config_dst->height_y); assert(dvs_config_src->height_uv == dvs_config_dst->height_uv); width_y = dvs_config_src->width_y; height_y = dvs_config_src->height_y; width_uv = dvs_config_src->width_uv; /* = Y/2, depens on colour format YUV 4.2.0*/ height_uv = dvs_config_src->height_uv; memcpy(dvs_config_dst->xcoords_y, dvs_config_src->xcoords_y, (width_y * height_y * sizeof(uint32_t))); memcpy(dvs_config_dst->ycoords_y, dvs_config_src->ycoords_y, (width_y * height_y * sizeof(uint32_t))); memcpy(dvs_config_dst->xcoords_uv, dvs_config_src->xcoords_uv, (width_uv * height_uv * sizeof(uint32_t))); memcpy(dvs_config_dst->ycoords_uv, dvs_config_src->ycoords_uv, (width_uv * height_uv * sizeof(uint32_t))); } void ia_css_dvs_statistics_get(enum dvs_statistics_type type, union ia_css_dvs_statistics_host *host_stats, const union ia_css_dvs_statistics_isp *isp_stats) { if (type == DVS_STATISTICS) { ia_css_get_dvs_statistics(host_stats->p_dvs_statistics_host, isp_stats->p_dvs_statistics_isp); } else if (type == DVS2_STATISTICS) { ia_css_get_dvs2_statistics(host_stats->p_dvs2_statistics_host, isp_stats->p_dvs_statistics_isp); } return; }
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) International Business Machines Corp., 2006 * Copyright (c) Nokia Corporation, 2006, 2007 * * Author: Artem Bityutskiy (Битюцкий Артём) */ /* * This file includes volume table manipulation code. The volume table is an * on-flash table containing volume meta-data like name, number of reserved * physical eraseblocks, type, etc. The volume table is stored in the so-called * "layout volume". * * The layout volume is an internal volume which is organized as follows. It * consists of two logical eraseblocks - LEB 0 and LEB 1. Each logical * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each * other. This redundancy guarantees robustness to unclean reboots. The volume * table is basically an array of volume table records. Each record contains * full information about the volume and protected by a CRC checksum. Note, * nowadays we use the atomic LEB change operation when updating the volume * table, so we do not really need 2 LEBs anymore, but we preserve the older * design for the backward compatibility reasons. * * When the volume table is changed, it is first changed in RAM. Then LEB 0 is * erased, and the updated volume table is written back to LEB 0. Then same for * LEB 1. This scheme guarantees recoverability from unclean reboots. * * In this UBI implementation the on-flash volume table does not contain any * information about how much data static volumes contain. * * But it would still be beneficial to store this information in the volume * table. For example, suppose we have a static volume X, and all its physical * eraseblocks became bad for some reasons. Suppose we are attaching the * corresponding MTD device, for some reason we find no logical eraseblocks * corresponding to the volume X. According to the volume table volume X does * exist. So we don't know whether it is just empty or all its physical * eraseblocks went bad. So we cannot alarm the user properly. * * The volume table also stores so-called "update marker", which is used for * volume updates. Before updating the volume, the update marker is set, and * after the update operation is finished, the update marker is cleared. So if * the update operation was interrupted (e.g. by an unclean reboot) - the * update marker is still there and we know that the volume's contents is * damaged. */ #include <linux/crc32.h> #include <linux/err.h> #include <linux/slab.h> #include <asm/div64.h> #include "ubi.h" static void self_vtbl_check(const struct ubi_device *ubi); /* Empty volume table record */ static struct ubi_vtbl_record empty_vtbl_record; /** * ubi_update_layout_vol - helper for updatting layout volumes on flash * @ubi: UBI device description object */ static int ubi_update_layout_vol(struct ubi_device *ubi) { struct ubi_volume *layout_vol; int i, err; layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl, ubi->vtbl_size); if (err) return err; } return 0; } /** * ubi_change_vtbl_record - change volume table record. * @ubi: UBI device description object * @idx: table index to change * @vtbl_rec: new volume table record * * This function changes volume table record @idx. If @vtbl_rec is %NULL, empty * volume table record is written. The caller does not have to calculate CRC of * the record as it is done by this function. Returns zero in case of success * and a negative error code in case of failure. */ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, struct ubi_vtbl_record *vtbl_rec) { int err; uint32_t crc; ubi_assert(idx >= 0 && idx < ubi->vtbl_slots); if (!vtbl_rec) vtbl_rec = &empty_vtbl_record; else { crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC); vtbl_rec->crc = cpu_to_be32(crc); } memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); err = ubi_update_layout_vol(ubi); self_vtbl_check(ubi); return err ? err : 0; } /** * ubi_vtbl_rename_volumes - rename UBI volumes in the volume table. * @ubi: UBI device description object * @rename_list: list of &struct ubi_rename_entry objects * * This function re-names multiple volumes specified in @req in the volume * table. Returns zero in case of success and a negative error code in case of * failure. */ int ubi_vtbl_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list) { struct ubi_rename_entry *re; list_for_each_entry(re, rename_list, list) { uint32_t crc; struct ubi_volume *vol = re->desc->vol; struct ubi_vtbl_record *vtbl_rec = &ubi->vtbl[vol->vol_id]; if (re->remove) { memcpy(vtbl_rec, &empty_vtbl_record, sizeof(struct ubi_vtbl_record)); continue; } vtbl_rec->name_len = cpu_to_be16(re->new_name_len); memcpy(vtbl_rec->name, re->new_name, re->new_name_len); memset(vtbl_rec->name + re->new_name_len, 0, UBI_VOL_NAME_MAX + 1 - re->new_name_len); crc = crc32(UBI_CRC32_INIT, vtbl_rec, UBI_VTBL_RECORD_SIZE_CRC); vtbl_rec->crc = cpu_to_be32(crc); } return ubi_update_layout_vol(ubi); } /** * vtbl_check - check if volume table is not corrupted and sensible. * @ubi: UBI device description object * @vtbl: volume table * * This function returns zero if @vtbl is all right, %1 if CRC is incorrect, * and %-EINVAL if it contains inconsistent data. */ static int vtbl_check(const struct ubi_device *ubi, const struct ubi_vtbl_record *vtbl) { int i, n, reserved_pebs, alignment, data_pad, vol_type, name_len; int upd_marker, err; uint32_t crc; const char *name; for (i = 0; i < ubi->vtbl_slots; i++) { cond_resched(); reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); alignment = be32_to_cpu(vtbl[i].alignment); data_pad = be32_to_cpu(vtbl[i].data_pad); upd_marker = vtbl[i].upd_marker; vol_type = vtbl[i].vol_type; name_len = be16_to_cpu(vtbl[i].name_len); name = &vtbl[i].name[0]; crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC); if (be32_to_cpu(vtbl[i].crc) != crc) { ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x", i, crc, be32_to_cpu(vtbl[i].crc)); ubi_dump_vtbl_record(&vtbl[i], i); return 1; } if (reserved_pebs == 0) { if (memcmp(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE)) { err = 2; goto bad; } continue; } if (reserved_pebs < 0 || alignment < 0 || data_pad < 0 || name_len < 0) { err = 3; goto bad; } if (alignment > ubi->leb_size || alignment == 0) { err = 4; goto bad; } n = alignment & (ubi->min_io_size - 1); if (alignment != 1 && n) { err = 5; goto bad; } n = ubi->leb_size % alignment; if (data_pad != n) { ubi_err(ubi, "bad data_pad, has to be %d", n); err = 6; goto bad; } if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { err = 7; goto bad; } if (upd_marker != 0 && upd_marker != 1) { err = 8; goto bad; } if (reserved_pebs > ubi->good_peb_count) { ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d", reserved_pebs, ubi->good_peb_count); err = 9; goto bad; } if (name_len > UBI_VOL_NAME_MAX) { err = 10; goto bad; } if (name[0] == '\0') { err = 11; goto bad; } if (name_len != strnlen(name, name_len + 1)) { err = 12; goto bad; } } /* Checks that all names are unique */ for (i = 0; i < ubi->vtbl_slots - 1; i++) { for (n = i + 1; n < ubi->vtbl_slots; n++) { int len1 = be16_to_cpu(vtbl[i].name_len); int len2 = be16_to_cpu(vtbl[n].name_len); if (len1 > 0 && len1 == len2 && !strncmp(vtbl[i].name, vtbl[n].name, len1)) { ubi_err(ubi, "volumes %d and %d have the same name \"%s\"", i, n, vtbl[i].name); ubi_dump_vtbl_record(&vtbl[i], i); ubi_dump_vtbl_record(&vtbl[n], n); return -EINVAL; } } } return 0; bad: ubi_err(ubi, "volume table check failed: record %d, error %d", i, err); ubi_dump_vtbl_record(&vtbl[i], i); return -EINVAL; } /** * create_vtbl - create a copy of volume table. * @ubi: UBI device description object * @ai: attaching information * @copy: number of the volume table copy * @vtbl: contents of the volume table * * This function returns zero in case of success and a negative error code in * case of failure. */ static int create_vtbl(struct ubi_device *ubi, struct ubi_attach_info *ai, int copy, void *vtbl) { int err, tries = 0; struct ubi_vid_io_buf *vidb; struct ubi_vid_hdr *vid_hdr; struct ubi_ainf_peb *new_aeb; dbg_gen("create volume table (copy #%d)", copy + 1); vidb = ubi_alloc_vid_buf(ubi, GFP_KERNEL); if (!vidb) return -ENOMEM; vid_hdr = ubi_get_vid_hdr(vidb); retry: new_aeb = ubi_early_get_peb(ubi, ai); if (IS_ERR(new_aeb)) { err = PTR_ERR(new_aeb); goto out_free; } vid_hdr->vol_type = UBI_LAYOUT_VOLUME_TYPE; vid_hdr->vol_id = cpu_to_be32(UBI_LAYOUT_VOLUME_ID); vid_hdr->compat = UBI_LAYOUT_VOLUME_COMPAT; vid_hdr->data_size = vid_hdr->used_ebs = vid_hdr->data_pad = cpu_to_be32(0); vid_hdr->lnum = cpu_to_be32(copy); vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum); /* The EC header is already there, write the VID header */ err = ubi_io_write_vid_hdr(ubi, new_aeb->pnum, vidb); if (err) goto write_error; /* Write the layout volume contents */ err = ubi_io_write_data(ubi, vtbl, new_aeb->pnum, 0, ubi->vtbl_size); if (err) goto write_error; /* * And add it to the attaching information. Don't delete the old version * of this LEB as it will be deleted and freed in 'ubi_add_to_av()'. */ err = ubi_add_to_av(ubi, ai, new_aeb->pnum, new_aeb->ec, vid_hdr, 0); ubi_free_aeb(ai, new_aeb); ubi_free_vid_buf(vidb); return err; write_error: if (err == -EIO && ++tries <= 5) { /* * Probably this physical eraseblock went bad, try to pick * another one. */ list_add(&new_aeb->u.list, &ai->erase); goto retry; } ubi_free_aeb(ai, new_aeb); out_free: ubi_free_vid_buf(vidb); return err; } /** * process_lvol - process the layout volume. * @ubi: UBI device description object * @ai: attaching information * @av: layout volume attaching information * * This function is responsible for reading the layout volume, ensuring it is * not corrupted, and recovering from corruptions if needed. Returns volume * table in case of success and a negative error code in case of failure. */ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi, struct ubi_attach_info *ai, struct ubi_ainf_volume *av) { int err; struct rb_node *rb; struct ubi_ainf_peb *aeb; struct ubi_vtbl_record *leb[UBI_LAYOUT_VOLUME_EBS] = { NULL, NULL }; int leb_corrupted[UBI_LAYOUT_VOLUME_EBS] = {1, 1}; /* * UBI goes through the following steps when it changes the layout * volume: * a. erase LEB 0; * b. write new data to LEB 0; * c. erase LEB 1; * d. write new data to LEB 1. * * Before the change, both LEBs contain the same data. * * Due to unclean reboots, the contents of LEB 0 may be lost, but there * should LEB 1. So it is OK if LEB 0 is corrupted while LEB 1 is not. * Similarly, LEB 1 may be lost, but there should be LEB 0. And * finally, unclean reboots may result in a situation when neither LEB * 0 nor LEB 1 are corrupted, but they are different. In this case, LEB * 0 contains more recent information. * * So the plan is to first check LEB 0. Then * a. if LEB 0 is OK, it must be containing the most recent data; then * we compare it with LEB 1, and if they are different, we copy LEB * 0 to LEB 1; * b. if LEB 0 is corrupted, but LEB 1 has to be OK, and we copy LEB 1 * to LEB 0. */ dbg_gen("check layout volume"); /* Read both LEB 0 and LEB 1 into memory */ ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { leb[aeb->lnum] = vzalloc(ubi->vtbl_size); if (!leb[aeb->lnum]) { err = -ENOMEM; goto out_free; } err = ubi_io_read_data(ubi, leb[aeb->lnum], aeb->pnum, 0, ubi->vtbl_size); if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) /* * Scrub the PEB later. Note, -EBADMSG indicates an * uncorrectable ECC error, but we have our own CRC and * the data will be checked later. If the data is OK, * the PEB will be scrubbed (because we set * aeb->scrub). If the data is not OK, the contents of * the PEB will be recovered from the second copy, and * aeb->scrub will be cleared in * 'ubi_add_to_av()'. */ aeb->scrub = 1; else if (err) goto out_free; } err = -EINVAL; if (leb[0]) { leb_corrupted[0] = vtbl_check(ubi, leb[0]); if (leb_corrupted[0] < 0) goto out_free; } if (!leb_corrupted[0]) { /* LEB 0 is OK */ if (leb[1]) leb_corrupted[1] = memcmp(leb[0], leb[1], ubi->vtbl_size); if (leb_corrupted[1]) { ubi_warn(ubi, "volume table copy #2 is corrupted"); err = create_vtbl(ubi, ai, 1, leb[0]); if (err) goto out_free; ubi_msg(ubi, "volume table was restored"); } /* Both LEB 1 and LEB 2 are OK and consistent */ vfree(leb[1]); return leb[0]; } else { /* LEB 0 is corrupted or does not exist */ if (leb[1]) { leb_corrupted[1] = vtbl_check(ubi, leb[1]); if (leb_corrupted[1] < 0) goto out_free; } if (leb_corrupted[1]) { /* Both LEB 0 and LEB 1 are corrupted */ ubi_err(ubi, "both volume tables are corrupted"); goto out_free; } ubi_warn(ubi, "volume table copy #1 is corrupted"); err = create_vtbl(ubi, ai, 0, leb[1]); if (err) goto out_free; ubi_msg(ubi, "volume table was restored"); vfree(leb[0]); return leb[1]; } out_free: vfree(leb[0]); vfree(leb[1]); return ERR_PTR(err); } /** * create_empty_lvol - create empty layout volume. * @ubi: UBI device description object * @ai: attaching information * * This function returns volume table contents in case of success and a * negative error code in case of failure. */ static struct ubi_vtbl_record *create_empty_lvol(struct ubi_device *ubi, struct ubi_attach_info *ai) { int i; struct ubi_vtbl_record *vtbl; vtbl = vzalloc(ubi->vtbl_size); if (!vtbl) return ERR_PTR(-ENOMEM); for (i = 0; i < ubi->vtbl_slots; i++) memcpy(&vtbl[i], &empty_vtbl_record, UBI_VTBL_RECORD_SIZE); for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { int err; err = create_vtbl(ubi, ai, i, vtbl); if (err) { vfree(vtbl); return ERR_PTR(err); } } return vtbl; } /** * init_volumes - initialize volume information for existing volumes. * @ubi: UBI device description object * @ai: scanning information * @vtbl: volume table * * This function allocates volume description objects for existing volumes. * Returns zero in case of success and a negative error code in case of * failure. */ static int init_volumes(struct ubi_device *ubi, const struct ubi_attach_info *ai, const struct ubi_vtbl_record *vtbl) { int i, err, reserved_pebs = 0; struct ubi_ainf_volume *av; struct ubi_volume *vol; for (i = 0; i < ubi->vtbl_slots; i++) { cond_resched(); if (be32_to_cpu(vtbl[i].reserved_pebs) == 0) continue; /* Empty record */ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); if (!vol) return -ENOMEM; vol->reserved_pebs = be32_to_cpu(vtbl[i].reserved_pebs); vol->alignment = be32_to_cpu(vtbl[i].alignment); vol->data_pad = be32_to_cpu(vtbl[i].data_pad); vol->upd_marker = vtbl[i].upd_marker; vol->vol_type = vtbl[i].vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; vol->name_len = be16_to_cpu(vtbl[i].name_len); vol->usable_leb_size = ubi->leb_size - vol->data_pad; memcpy(vol->name, vtbl[i].name, vol->name_len); vol->name[vol->name_len] = '\0'; vol->vol_id = i; if (vtbl[i].flags & UBI_VTBL_SKIP_CRC_CHECK_FLG) vol->skip_check = 1; if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { /* Auto re-size flag may be set only for one volume */ if (ubi->autoresize_vol_id != -1) { ubi_err(ubi, "more than one auto-resize volume (%d and %d)", ubi->autoresize_vol_id, i); kfree(vol); return -EINVAL; } ubi->autoresize_vol_id = i; } ubi_assert(!ubi->volumes[i]); ubi->volumes[i] = vol; ubi->vol_count += 1; vol->ubi = ubi; reserved_pebs += vol->reserved_pebs; /* * We use ubi->peb_count and not vol->reserved_pebs because * we want to keep the code simple. Otherwise we'd have to * resize/check the bitmap upon volume resize too. * Allocating a few bytes more does not hurt. */ err = ubi_fastmap_init_checkmap(vol, ubi->peb_count); if (err) return err; /* * In case of dynamic volume UBI knows nothing about how many * data is stored there. So assume the whole volume is used. */ if (vol->vol_type == UBI_DYNAMIC_VOLUME) { vol->used_ebs = vol->reserved_pebs; vol->last_eb_bytes = vol->usable_leb_size; vol->used_bytes = (long long)vol->used_ebs * vol->usable_leb_size; continue; } /* Static volumes only */ av = ubi_find_av(ai, i); if (!av || !av->leb_count) { /* * No eraseblocks belonging to this volume found. We * don't actually know whether this static volume is * completely corrupted or just contains no data. And * we cannot know this as long as data size is not * stored on flash. So we just assume the volume is * empty. FIXME: this should be handled. */ continue; } if (av->leb_count != av->used_ebs) { /* * We found a static volume which misses several * eraseblocks. Treat it as corrupted. */ ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted", av->vol_id, av->used_ebs - av->leb_count); vol->corrupted = 1; continue; } vol->used_ebs = av->used_ebs; vol->used_bytes = (long long)(vol->used_ebs - 1) * vol->usable_leb_size; vol->used_bytes += av->last_data_size; vol->last_eb_bytes = av->last_data_size; } /* And add the layout volume */ vol = kzalloc(sizeof(struct ubi_volume), GFP_KERNEL); if (!vol) return -ENOMEM; vol->reserved_pebs = UBI_LAYOUT_VOLUME_EBS; vol->alignment = UBI_LAYOUT_VOLUME_ALIGN; vol->vol_type = UBI_DYNAMIC_VOLUME; vol->name_len = sizeof(UBI_LAYOUT_VOLUME_NAME) - 1; memcpy(vol->name, UBI_LAYOUT_VOLUME_NAME, vol->name_len + 1); vol->usable_leb_size = ubi->leb_size; vol->used_ebs = vol->reserved_pebs; vol->last_eb_bytes = vol->reserved_pebs; vol->used_bytes = (long long)vol->used_ebs * (ubi->leb_size - vol->data_pad); vol->vol_id = UBI_LAYOUT_VOLUME_ID; vol->ref_count = 1; ubi_assert(!ubi->volumes[i]); ubi->volumes[vol_id2idx(ubi, vol->vol_id)] = vol; reserved_pebs += vol->reserved_pebs; ubi->vol_count += 1; vol->ubi = ubi; err = ubi_fastmap_init_checkmap(vol, UBI_LAYOUT_VOLUME_EBS); if (err) return err; if (reserved_pebs > ubi->avail_pebs) { ubi_err(ubi, "not enough PEBs, required %d, available %d", reserved_pebs, ubi->avail_pebs); if (ubi->corr_peb_count) ubi_err(ubi, "%d PEBs are corrupted and not used", ubi->corr_peb_count); return -ENOSPC; } ubi->rsvd_pebs += reserved_pebs; ubi->avail_pebs -= reserved_pebs; return 0; } /** * check_av - check volume attaching information. * @vol: UBI volume description object * @av: volume attaching information * * This function returns zero if the volume attaching information is consistent * to the data read from the volume tabla, and %-EINVAL if not. */ static int check_av(const struct ubi_volume *vol, const struct ubi_ainf_volume *av) { int err; if (av->highest_lnum >= vol->reserved_pebs) { err = 1; goto bad; } if (av->leb_count > vol->reserved_pebs) { err = 2; goto bad; } if (av->vol_type != vol->vol_type) { err = 3; goto bad; } if (av->used_ebs > vol->reserved_pebs) { err = 4; goto bad; } if (av->data_pad != vol->data_pad) { err = 5; goto bad; } return 0; bad: ubi_err(vol->ubi, "bad attaching information, error %d", err); ubi_dump_av(av); ubi_dump_vol_info(vol); return -EINVAL; } /** * check_attaching_info - check that attaching information. * @ubi: UBI device description object * @ai: attaching information * * Even though we protect on-flash data by CRC checksums, we still don't trust * the media. This function ensures that attaching information is consistent to * the information read from the volume table. Returns zero if the attaching * information is OK and %-EINVAL if it is not. */ static int check_attaching_info(const struct ubi_device *ubi, struct ubi_attach_info *ai) { int err, i; struct ubi_ainf_volume *av; struct ubi_volume *vol; if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) { ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d", ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots); return -EINVAL; } if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT && ai->highest_vol_id < UBI_INTERNAL_VOL_START) { ubi_err(ubi, "too large volume ID %d found", ai->highest_vol_id); return -EINVAL; } for (i = 0; i < ubi->vtbl_slots + UBI_INT_VOL_COUNT; i++) { cond_resched(); av = ubi_find_av(ai, i); vol = ubi->volumes[i]; if (!vol) { if (av) ubi_remove_av(ai, av); continue; } if (vol->reserved_pebs == 0) { ubi_assert(i < ubi->vtbl_slots); if (!av) continue; /* * During attaching we found a volume which does not * exist according to the information in the volume * table. This must have happened due to an unclean * reboot while the volume was being removed. Discard * these eraseblocks. */ ubi_msg(ubi, "finish volume %d removal", av->vol_id); ubi_remove_av(ai, av); } else if (av) { err = check_av(vol, av); if (err) return err; } } return 0; } /** * ubi_read_volume_table - read the volume table. * @ubi: UBI device description object * @ai: attaching information * * This function reads volume table, checks it, recover from errors if needed, * or creates it if needed. Returns zero in case of success and a negative * error code in case of failure. */ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai) { int err; struct ubi_ainf_volume *av; empty_vtbl_record.crc = cpu_to_be32(0xf116c36b); /* * The number of supported volumes is limited by the eraseblock size * and by the UBI_MAX_VOLUMES constant. */ if (ubi->leb_size < UBI_VTBL_RECORD_SIZE) { ubi_err(ubi, "LEB size too small for a volume record"); return -EINVAL; } ubi->vtbl_slots = ubi->leb_size / UBI_VTBL_RECORD_SIZE; if (ubi->vtbl_slots > UBI_MAX_VOLUMES) ubi->vtbl_slots = UBI_MAX_VOLUMES; ubi->vtbl_size = ubi->vtbl_slots * UBI_VTBL_RECORD_SIZE; ubi->vtbl_size = ALIGN(ubi->vtbl_size, ubi->min_io_size); av = ubi_find_av(ai, UBI_LAYOUT_VOLUME_ID); if (!av) { /* * No logical eraseblocks belonging to the layout volume were * found. This could mean that the flash is just empty. In * this case we create empty layout volume. * * But if flash is not empty this must be a corruption or the * MTD device just contains garbage. */ if (ai->is_empty) { ubi->vtbl = create_empty_lvol(ubi, ai); if (IS_ERR(ubi->vtbl)) return PTR_ERR(ubi->vtbl); } else { ubi_err(ubi, "the layout volume was not found"); return -EINVAL; } } else { if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) { /* This must not happen with proper UBI images */ ubi_err(ubi, "too many LEBs (%d) in layout volume", av->leb_count); return -EINVAL; } ubi->vtbl = process_lvol(ubi, ai, av); if (IS_ERR(ubi->vtbl)) return PTR_ERR(ubi->vtbl); } ubi->avail_pebs = ubi->good_peb_count - ubi->corr_peb_count; /* * The layout volume is OK, initialize the corresponding in-RAM data * structures. */ err = init_volumes(ubi, ai, ubi->vtbl); if (err) goto out_free; /* * Make sure that the attaching information is consistent to the * information stored in the volume table. */ err = check_attaching_info(ubi, ai); if (err) goto out_free; return 0; out_free: vfree(ubi->vtbl); ubi_free_all_volumes(ubi); return err; } /** * self_vtbl_check - check volume table. * @ubi: UBI device description object */ static void self_vtbl_check(const struct ubi_device *ubi) { if (!ubi_dbg_chk_gen(ubi)) return; if (vtbl_check(ubi, ubi->vtbl)) { ubi_err(ubi, "self-check failed"); BUG(); } }
/* * Copyright 2023 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef __GMC_V12_0_H__ #define __GMC_V12_0_H__ extern const struct amd_ip_funcs gmc_v12_0_ip_funcs; extern const struct amdgpu_ip_block_version gmc_v12_0_ip_block; #endif
// SPDX-License-Identifier: GPL-2.0 /* * TI j721e Cadence DSI wrapper * * Copyright (C) 2022 Texas Instruments Incorporated - http://www.ti.com/ * Author: Rahul T R <[email protected]> */ #include <linux/io.h> #include <linux/platform_device.h> #include "cdns-dsi-j721e.h" #define DSI_WRAP_REVISION 0x0 #define DSI_WRAP_DPI_CONTROL 0x4 #define DSI_WRAP_DSC_CONTROL 0x8 #define DSI_WRAP_DPI_SECURE 0xc #define DSI_WRAP_DSI_0_ASF_STATUS 0x10 #define DSI_WRAP_DPI_0_EN BIT(0) #define DSI_WRAP_DSI2_MUX_SEL BIT(4) static int cdns_dsi_j721e_init(struct cdns_dsi *dsi) { struct platform_device *pdev = to_platform_device(dsi->base.dev); dsi->j721e_regs = devm_platform_ioremap_resource(pdev, 1); return PTR_ERR_OR_ZERO(dsi->j721e_regs); } static void cdns_dsi_j721e_enable(struct cdns_dsi *dsi) { /* * Enable DPI0 as its input. DSS0 DPI2 is connected * to DSI DPI0. This is the only supported configuration on * J721E. */ writel(DSI_WRAP_DPI_0_EN, dsi->j721e_regs + DSI_WRAP_DPI_CONTROL); } static void cdns_dsi_j721e_disable(struct cdns_dsi *dsi) { /* Put everything to defaults */ writel(0, dsi->j721e_regs + DSI_WRAP_DPI_CONTROL); } const struct cdns_dsi_platform_ops dsi_ti_j721e_ops = { .init = cdns_dsi_j721e_init, .enable = cdns_dsi_j721e_enable, .disable = cdns_dsi_j721e_disable, };
/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ /* Copyright (C) 2015-2018 Netronome Systems, Inc. */ /* * nfp.h * Interface for NFP device access and query functions. */ #ifndef __NFP_H__ #define __NFP_H__ #include <linux/device.h> #include <linux/types.h> #include "nfp_cpp.h" /* Implemented in nfp_hwinfo.c */ struct nfp_hwinfo; struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp); const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup); char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo); u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo); /* Implemented in nfp_nsp.c, low level functions */ struct nfp_nsp; struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state); bool nfp_nsp_config_modified(struct nfp_nsp *state); void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified); void *nfp_nsp_config_entries(struct nfp_nsp *state); unsigned int nfp_nsp_config_idx(struct nfp_nsp *state); void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx); void nfp_nsp_config_clear_state(struct nfp_nsp *state); int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size); int nfp_nsp_write_eth_table(struct nfp_nsp *state, const void *buf, unsigned int size); int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size); int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, void *buf, unsigned int size); /* Implemented in nfp_resource.c */ /* All keys are CRC32-POSIX of the 8-byte identification string */ /* ARM/PCI vNIC Interfaces 0..3 */ #define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0" #define NFP_RESOURCE_VNIC_PCI_1 "vnic.p1" #define NFP_RESOURCE_VNIC_PCI_2 "vnic.p2" #define NFP_RESOURCE_VNIC_PCI_3 "vnic.p3" /* NFP Hardware Info Database */ #define NFP_RESOURCE_NFP_HWINFO "nfp.info" /* Service Processor */ #define NFP_RESOURCE_NSP "nfp.sp" #define NFP_RESOURCE_NSP_DIAG "arm.diag" /* Netronone Flow Firmware Table */ #define NFP_RESOURCE_NFP_NFFW "nfp.nffw" /* MAC Statistics Accumulator */ #define NFP_RESOURCE_MAC_STATISTICS "mac.stat" int nfp_resource_table_init(struct nfp_cpp *cpp); struct nfp_resource * nfp_resource_acquire(struct nfp_cpp *cpp, const char *name); void nfp_resource_release(struct nfp_resource *res); int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs); u32 nfp_resource_cpp_id(struct nfp_resource *res); const char *nfp_resource_name(struct nfp_resource *res); u64 nfp_resource_address(struct nfp_resource *res); u64 nfp_resource_size(struct nfp_resource *res); #endif /* !__NFP_H__ */
// SPDX-License-Identifier: GPL-2.0-only #include <linux/bug.h> #include <linux/export.h> #include <linux/irqflags.h> noinstr void warn_bogus_irq_restore(void) { instrumentation_begin(); WARN_ONCE(1, "raw_local_irq_restore() called with IRQs enabled\n"); instrumentation_end(); } EXPORT_SYMBOL(warn_bogus_irq_restore);
// SPDX-License-Identifier: MIT /* * Copyright © 2024 Intel Corporation */ #include <linux/workqueue.h> #include "i915_drv.h" #include "intel_display_types.h" #include "intel_encoder.h" static void intel_encoder_link_check_work_fn(struct work_struct *work) { struct intel_encoder *encoder = container_of(work, typeof(*encoder), link_check_work.work); encoder->link_check(encoder); } void intel_encoder_link_check_init(struct intel_encoder *encoder, void (*callback)(struct intel_encoder *encoder)) { INIT_DELAYED_WORK(&encoder->link_check_work, intel_encoder_link_check_work_fn); encoder->link_check = callback; } void intel_encoder_link_check_flush_work(struct intel_encoder *encoder) { cancel_delayed_work_sync(&encoder->link_check_work); } void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int delay_ms) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); mod_delayed_work(i915->unordered_wq, &encoder->link_check_work, msecs_to_jiffies(delay_ms)); } void intel_encoder_suspend_all(struct intel_display *display) { struct intel_encoder *encoder; if (!HAS_DISPLAY(display)) return; /* * TODO: check and remove holding the modeset locks if none of * the encoders depends on this. */ drm_modeset_lock_all(display->drm); for_each_intel_encoder(display->drm, encoder) if (encoder->suspend) encoder->suspend(encoder); drm_modeset_unlock_all(display->drm); for_each_intel_encoder(display->drm, encoder) if (encoder->suspend_complete) encoder->suspend_complete(encoder); } void intel_encoder_shutdown_all(struct intel_display *display) { struct intel_encoder *encoder; if (!HAS_DISPLAY(display)) return; /* * TODO: check and remove holding the modeset locks if none of * the encoders depends on this. */ drm_modeset_lock_all(display->drm); for_each_intel_encoder(display->drm, encoder) if (encoder->shutdown) encoder->shutdown(encoder); drm_modeset_unlock_all(display->drm); for_each_intel_encoder(display->drm, encoder) if (encoder->shutdown_complete) encoder->shutdown_complete(encoder); }
// SPDX-License-Identifier: MIT /* Copyright © 2022-2024 Advanced Micro Devices, Inc. All rights reserved. */ #define SMU13_DRIVER_IF_VERSION 0x18 //Only Clks that have DPM descriptors are listed here typedef enum { PPCLK_GFXCLK = 0, PPCLK_SOCCLK, PPCLK_UCLK, PPCLK_FCLK, PPCLK_DCLK_0, PPCLK_VCLK_0, PPCLK_DCLK_1, PPCLK_VCLK_1, PPCLK_DISPCLK, PPCLK_DPPCLK, PPCLK_DPREFCLK, PPCLK_DCFCLK, PPCLK_DTBCLK, PPCLK_COUNT, } PPCLK_e; typedef struct { uint8_t WmSetting; uint8_t Flags; uint8_t Padding[2]; } WatermarkRowGeneric_t; #define NUM_WM_RANGES 4 typedef enum { WATERMARKS_CLOCK_RANGE = 0, WATERMARKS_DUMMY_PSTATE, WATERMARKS_MALL, WATERMARKS_COUNT, } WATERMARKS_FLAGS_e; typedef struct { // Watermarks WatermarkRowGeneric_t WatermarkRow[NUM_WM_RANGES]; } Watermarks_t; typedef struct { Watermarks_t Watermarks; uint32_t Spare[16]; uint32_t MmHubPadding[8]; // SMU internal use } WatermarksExternal_t; // Table types #define TABLE_PMFW_PPTABLE 0 #define TABLE_COMBO_PPTABLE 1 #define TABLE_WATERMARKS 2 #define TABLE_AVFS_PSM_DEBUG 3 #define TABLE_PMSTATUSLOG 4 #define TABLE_SMU_METRICS 5 #define TABLE_DRIVER_SMU_CONFIG 6 #define TABLE_ACTIVITY_MONITOR_COEFF 7 #define TABLE_OVERDRIVE 8 #define TABLE_I2C_COMMANDS 9 #define TABLE_DRIVER_INFO 10 #define TABLE_COUNT 11
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2024 Meta Platforms, Inc. and affiliates. */ #include <vmlinux.h> #include <bpf/bpf_helpers.h> #include "../bpf_testmod/bpf_testmod.h" char _license[] SEC("license") = "GPL"; /* * This subprogram validates that libbpf handles the situation in which BPF * object has subprograms in .text section, but has no entry BPF programs. * At some point that was causing issues due to legacy logic of treating such * subprogram as entry program (with unknown program type, which would fail). */ int dangling_subprog(void) { /* do nothing, just be here */ return 0; } SEC(".struct_ops.link") struct bpf_testmod_ops testmod_do_detach;
/* SPDX-License-Identifier: GPL-2.0-only * * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. */ #ifndef MHICONTROLLERQAIC_H_ #define MHICONTROLLERQAIC_H_ struct mhi_controller *qaic_mhi_register_controller(struct pci_dev *pci_dev, void __iomem *mhi_bar, int mhi_irq, bool shared_msi); void qaic_mhi_free_controller(struct mhi_controller *mhi_cntrl, bool link_up); void qaic_mhi_start_reset(struct mhi_controller *mhi_cntrl); void qaic_mhi_reset_done(struct mhi_controller *mhi_cntrl); #endif /* MHICONTROLLERQAIC_H_ */
// SPDX-License-Identifier: GPL-2.0-or-later /* Applied Micro X-Gene SoC Ethernet Driver * * Copyright (c) 2014, Applied Micro Circuits Corporation * Authors: Iyappan Subramanian <[email protected]> * Ravi Patel <[email protected]> * Keyur Chudgar <[email protected]> */ #include "xgene_enet_main.h" #include "xgene_enet_hw.h" static void xgene_enet_ring_init(struct xgene_enet_desc_ring *ring) { u32 *ring_cfg = ring->state; u64 addr = ring->dma; enum xgene_enet_ring_cfgsize cfgsize = ring->cfgsize; ring_cfg[4] |= (1 << SELTHRSH_POS) & CREATE_MASK(SELTHRSH_POS, SELTHRSH_LEN); ring_cfg[3] |= ACCEPTLERR; ring_cfg[2] |= QCOHERENT; addr >>= 8; ring_cfg[2] |= (addr << RINGADDRL_POS) & CREATE_MASK_ULL(RINGADDRL_POS, RINGADDRL_LEN); addr >>= RINGADDRL_LEN; ring_cfg[3] |= addr & CREATE_MASK_ULL(RINGADDRH_POS, RINGADDRH_LEN); ring_cfg[3] |= ((u32)cfgsize << RINGSIZE_POS) & CREATE_MASK(RINGSIZE_POS, RINGSIZE_LEN); } static void xgene_enet_ring_set_type(struct xgene_enet_desc_ring *ring) { u32 *ring_cfg = ring->state; bool is_bufpool; u32 val; is_bufpool = xgene_enet_is_bufpool(ring->id); val = (is_bufpool) ? RING_BUFPOOL : RING_REGULAR; ring_cfg[4] |= (val << RINGTYPE_POS) & CREATE_MASK(RINGTYPE_POS, RINGTYPE_LEN); if (is_bufpool) { ring_cfg[3] |= (BUFPOOL_MODE << RINGMODE_POS) & CREATE_MASK(RINGMODE_POS, RINGMODE_LEN); } } static void xgene_enet_ring_set_recombbuf(struct xgene_enet_desc_ring *ring) { u32 *ring_cfg = ring->state; ring_cfg[3] |= RECOMBBUF; ring_cfg[3] |= (0xf << RECOMTIMEOUTL_POS) & CREATE_MASK(RECOMTIMEOUTL_POS, RECOMTIMEOUTL_LEN); ring_cfg[4] |= 0x7 & CREATE_MASK(RECOMTIMEOUTH_POS, RECOMTIMEOUTH_LEN); } static void xgene_enet_ring_wr32(struct xgene_enet_desc_ring *ring, u32 offset, u32 data) { struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); iowrite32(data, pdata->ring_csr_addr + offset); } static void xgene_enet_ring_rd32(struct xgene_enet_desc_ring *ring, u32 offset, u32 *data) { struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); *data = ioread32(pdata->ring_csr_addr + offset); } static void xgene_enet_write_ring_state(struct xgene_enet_desc_ring *ring) { struct xgene_enet_pdata *pdata = netdev_priv(ring->ndev); int i; xgene_enet_ring_wr32(ring, CSR_RING_CONFIG, ring->num); for (i = 0; i < pdata->ring_ops->num_ring_config; i++) { xgene_enet_ring_wr32(ring, CSR_RING_WR_BASE + (i * 4), ring->state[i]); } } static void xgene_enet_clr_ring_state(struct xgene_enet_desc_ring *ring) { memset(ring->state, 0, sizeof(ring->state)); xgene_enet_write_ring_state(ring); } static void xgene_enet_set_ring_state(struct xgene_enet_desc_ring *ring) { xgene_enet_ring_set_type(ring); if (xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH0 || xgene_enet_ring_owner(ring->id) == RING_OWNER_ETH1) xgene_enet_ring_set_recombbuf(ring); xgene_enet_ring_init(ring); xgene_enet_write_ring_state(ring); } static void xgene_enet_set_ring_id(struct xgene_enet_desc_ring *ring) { u32 ring_id_val, ring_id_buf; bool is_bufpool; is_bufpool = xgene_enet_is_bufpool(ring->id); ring_id_val = ring->id & GENMASK(9, 0); ring_id_val |= OVERWRITE; ring_id_buf = (ring->num << 9) & GENMASK(18, 9); ring_id_buf |= PREFETCH_BUF_EN; if (is_bufpool) ring_id_buf |= IS_BUFFER_POOL; xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id_val); xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, ring_id_buf); } static void xgene_enet_clr_desc_ring_id(struct xgene_enet_desc_ring *ring) { u32 ring_id; ring_id = ring->id | OVERWRITE; xgene_enet_ring_wr32(ring, CSR_RING_ID, ring_id); xgene_enet_ring_wr32(ring, CSR_RING_ID_BUF, 0); } static struct xgene_enet_desc_ring *xgene_enet_setup_ring( struct xgene_enet_desc_ring *ring) { u32 size = ring->size; u32 i, data; bool is_bufpool; xgene_enet_clr_ring_state(ring); xgene_enet_set_ring_state(ring); xgene_enet_set_ring_id(ring); ring->slots = xgene_enet_get_numslots(ring->id, size); is_bufpool = xgene_enet_is_bufpool(ring->id); if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) return ring; for (i = 0; i < ring->slots; i++) xgene_enet_mark_desc_slot_empty(&ring->raw_desc[i]); xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); data |= BIT(31 - xgene_enet_ring_bufnum(ring->id)); xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); return ring; } static void xgene_enet_clear_ring(struct xgene_enet_desc_ring *ring) { u32 data; bool is_bufpool; is_bufpool = xgene_enet_is_bufpool(ring->id); if (is_bufpool || xgene_enet_ring_owner(ring->id) != RING_OWNER_CPU) goto out; xgene_enet_ring_rd32(ring, CSR_RING_NE_INT_MODE, &data); data &= ~BIT(31 - xgene_enet_ring_bufnum(ring->id)); xgene_enet_ring_wr32(ring, CSR_RING_NE_INT_MODE, data); out: xgene_enet_clr_desc_ring_id(ring); xgene_enet_clr_ring_state(ring); } static void xgene_enet_wr_cmd(struct xgene_enet_desc_ring *ring, int count) { iowrite32(count, ring->cmd); } static u32 xgene_enet_ring_len(struct xgene_enet_desc_ring *ring) { u32 __iomem *cmd_base = ring->cmd_base; u32 ring_state, num_msgs; ring_state = ioread32(&cmd_base[1]); num_msgs = GET_VAL(NUMMSGSINQ, ring_state); return num_msgs; } void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring, enum xgene_enet_err_code status) { switch (status) { case INGRESS_CRC: ring->rx_crc_errors++; break; case INGRESS_CHECKSUM: case INGRESS_CHECKSUM_COMPUTE: ring->rx_errors++; break; case INGRESS_TRUNC_FRAME: ring->rx_frame_errors++; break; case INGRESS_PKT_LEN: ring->rx_length_errors++; break; case INGRESS_PKT_UNDER: ring->rx_frame_errors++; break; case INGRESS_FIFO_OVERRUN: ring->rx_fifo_errors++; break; default: break; } } static void xgene_enet_wr_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->eth_csr_addr + offset; iowrite32(val, addr); } static void xgene_enet_wr_ring_if(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->eth_ring_if_addr + offset; iowrite32(val, addr); } static void xgene_enet_wr_diag_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->eth_diag_csr_addr + offset; iowrite32(val, addr); } static void xgene_enet_wr_mcx_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 val) { void __iomem *addr = pdata->mcx_mac_csr_addr + offset; iowrite32(val, addr); } void xgene_enet_wr_mac(struct xgene_enet_pdata *pdata, u32 wr_addr, u32 wr_data) { void __iomem *addr, *wr, *cmd, *cmd_done; struct net_device *ndev = pdata->ndev; u8 wait = 10; u32 done; if (pdata->mdio_driver && ndev->phydev && phy_interface_mode_is_rgmii(pdata->phy_mode)) { struct mii_bus *bus = ndev->phydev->mdio.bus; return xgene_mdio_wr_mac(bus->priv, wr_addr, wr_data); } addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; wr = pdata->mcx_mac_addr + MAC_WRITE_REG_OFFSET; cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; spin_lock(&pdata->mac_lock); iowrite32(wr_addr, addr); iowrite32(wr_data, wr); iowrite32(XGENE_ENET_WR_CMD, cmd); while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (!done) netdev_err(ndev, "mac write failed, addr: %04x data: %08x\n", wr_addr, wr_data); iowrite32(0, cmd); spin_unlock(&pdata->mac_lock); } static void xgene_enet_rd_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { void __iomem *addr = pdata->eth_csr_addr + offset; *val = ioread32(addr); } static void xgene_enet_rd_diag_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { void __iomem *addr = pdata->eth_diag_csr_addr + offset; *val = ioread32(addr); } static void xgene_enet_rd_mcx_csr(struct xgene_enet_pdata *pdata, u32 offset, u32 *val) { void __iomem *addr = pdata->mcx_mac_csr_addr + offset; *val = ioread32(addr); } u32 xgene_enet_rd_mac(struct xgene_enet_pdata *pdata, u32 rd_addr) { void __iomem *addr, *rd, *cmd, *cmd_done; struct net_device *ndev = pdata->ndev; u32 done, rd_data; u8 wait = 10; if (pdata->mdio_driver && ndev->phydev && phy_interface_mode_is_rgmii(pdata->phy_mode)) { struct mii_bus *bus = ndev->phydev->mdio.bus; return xgene_mdio_rd_mac(bus->priv, rd_addr); } addr = pdata->mcx_mac_addr + MAC_ADDR_REG_OFFSET; rd = pdata->mcx_mac_addr + MAC_READ_REG_OFFSET; cmd = pdata->mcx_mac_addr + MAC_COMMAND_REG_OFFSET; cmd_done = pdata->mcx_mac_addr + MAC_COMMAND_DONE_REG_OFFSET; spin_lock(&pdata->mac_lock); iowrite32(rd_addr, addr); iowrite32(XGENE_ENET_RD_CMD, cmd); while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (!done) netdev_err(ndev, "mac read failed, addr: %04x\n", rd_addr); rd_data = ioread32(rd); iowrite32(0, cmd); spin_unlock(&pdata->mac_lock); return rd_data; } u32 xgene_enet_rd_stat(struct xgene_enet_pdata *pdata, u32 rd_addr) { void __iomem *addr, *rd, *cmd, *cmd_done; u32 done, rd_data; u8 wait = 10; addr = pdata->mcx_stats_addr + STAT_ADDR_REG_OFFSET; rd = pdata->mcx_stats_addr + STAT_READ_REG_OFFSET; cmd = pdata->mcx_stats_addr + STAT_COMMAND_REG_OFFSET; cmd_done = pdata->mcx_stats_addr + STAT_COMMAND_DONE_REG_OFFSET; spin_lock(&pdata->stats_lock); iowrite32(rd_addr, addr); iowrite32(XGENE_ENET_RD_CMD, cmd); while (!(done = ioread32(cmd_done)) && wait--) udelay(1); if (!done) netdev_err(pdata->ndev, "mac stats read failed, addr: %04x\n", rd_addr); rd_data = ioread32(rd); iowrite32(0, cmd); spin_unlock(&pdata->stats_lock); return rd_data; } static void xgene_gmac_set_mac_addr(struct xgene_enet_pdata *pdata) { const u8 *dev_addr = pdata->ndev->dev_addr; u32 addr0, addr1; addr0 = (dev_addr[3] << 24) | (dev_addr[2] << 16) | (dev_addr[1] << 8) | dev_addr[0]; addr1 = (dev_addr[5] << 24) | (dev_addr[4] << 16); xgene_enet_wr_mac(pdata, STATION_ADDR0_ADDR, addr0); xgene_enet_wr_mac(pdata, STATION_ADDR1_ADDR, addr1); } static int xgene_enet_ecc_init(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; u32 data; u8 wait = 10; xgene_enet_wr_diag_csr(pdata, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0x0); do { usleep_range(100, 110); xgene_enet_rd_diag_csr(pdata, ENET_BLOCK_MEM_RDY_ADDR, &data); } while ((data != 0xffffffff) && wait--); if (data != 0xffffffff) { netdev_err(ndev, "Failed to release memory from shutdown\n"); return -ENODEV; } return 0; } static void xgene_gmac_reset(struct xgene_enet_pdata *pdata) { xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, SOFT_RESET1); xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, 0); } static void xgene_enet_configure_clock(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; if (dev->of_node) { struct clk *parent = clk_get_parent(pdata->clk); switch (pdata->phy_speed) { case SPEED_10: clk_set_rate(parent, 2500000); break; case SPEED_100: clk_set_rate(parent, 25000000); break; default: clk_set_rate(parent, 125000000); break; } } #ifdef CONFIG_ACPI else { switch (pdata->phy_speed) { case SPEED_10: acpi_evaluate_object(ACPI_HANDLE(dev), "S10", NULL, NULL); break; case SPEED_100: acpi_evaluate_object(ACPI_HANDLE(dev), "S100", NULL, NULL); break; default: acpi_evaluate_object(ACPI_HANDLE(dev), "S1G", NULL, NULL); break; } } #endif } static void xgene_gmac_set_speed(struct xgene_enet_pdata *pdata) { u32 icm0, icm2, mc2; u32 intf_ctl, rgmii, value; xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, &icm0); xgene_enet_rd_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, &icm2); mc2 = xgene_enet_rd_mac(pdata, MAC_CONFIG_2_ADDR); intf_ctl = xgene_enet_rd_mac(pdata, INTERFACE_CONTROL_ADDR); xgene_enet_rd_csr(pdata, RGMII_REG_0_ADDR, &rgmii); switch (pdata->phy_speed) { case SPEED_10: ENET_INTERFACE_MODE2_SET(&mc2, 1); intf_ctl &= ~(ENET_LHD_MODE | ENET_GHD_MODE); CFG_MACMODE_SET(&icm0, 0); CFG_WAITASYNCRD_SET(&icm2, 500); rgmii &= ~CFG_SPEED_1250; break; case SPEED_100: ENET_INTERFACE_MODE2_SET(&mc2, 1); intf_ctl &= ~ENET_GHD_MODE; intf_ctl |= ENET_LHD_MODE; CFG_MACMODE_SET(&icm0, 1); CFG_WAITASYNCRD_SET(&icm2, 80); rgmii &= ~CFG_SPEED_1250; break; default: ENET_INTERFACE_MODE2_SET(&mc2, 2); intf_ctl &= ~ENET_LHD_MODE; intf_ctl |= ENET_GHD_MODE; CFG_MACMODE_SET(&icm0, 2); CFG_WAITASYNCRD_SET(&icm2, 0); CFG_TXCLK_MUXSEL0_SET(&rgmii, pdata->tx_delay); CFG_RXCLK_MUXSEL0_SET(&rgmii, pdata->rx_delay); rgmii |= CFG_SPEED_1250; xgene_enet_rd_csr(pdata, DEBUG_REG_ADDR, &value); value |= CFG_BYPASS_UNISEC_TX | CFG_BYPASS_UNISEC_RX; xgene_enet_wr_csr(pdata, DEBUG_REG_ADDR, value); break; } mc2 |= FULL_DUPLEX2 | PAD_CRC | LENGTH_CHK; xgene_enet_wr_mac(pdata, MAC_CONFIG_2_ADDR, mc2); xgene_enet_wr_mac(pdata, INTERFACE_CONTROL_ADDR, intf_ctl); xgene_enet_wr_csr(pdata, RGMII_REG_0_ADDR, rgmii); xgene_enet_configure_clock(pdata); xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG0_REG_0_ADDR, icm0); xgene_enet_wr_mcx_csr(pdata, ICM_CONFIG2_REG_0_ADDR, icm2); } static void xgene_enet_set_frame_size(struct xgene_enet_pdata *pdata, int size) { xgene_enet_wr_mac(pdata, MAX_FRAME_LEN_ADDR, size); } static void xgene_gmac_enable_tx_pause(struct xgene_enet_pdata *pdata, bool enable) { u32 data; xgene_enet_rd_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, &data); if (enable) data |= MULTI_DPF_AUTOCTRL | PAUSE_XON_EN; else data &= ~(MULTI_DPF_AUTOCTRL | PAUSE_XON_EN); xgene_enet_wr_mcx_csr(pdata, CSR_ECM_CFG_0_ADDR, data); } static void xgene_gmac_flowctl_tx(struct xgene_enet_pdata *pdata, bool enable) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); if (enable) data |= TX_FLOW_EN; else data &= ~TX_FLOW_EN; xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data); pdata->mac_ops->enable_tx_pause(pdata, enable); } static void xgene_gmac_flowctl_rx(struct xgene_enet_pdata *pdata, bool enable) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); if (enable) data |= RX_FLOW_EN; else data &= ~RX_FLOW_EN; xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data); } static void xgene_gmac_init(struct xgene_enet_pdata *pdata) { u32 value; if (!pdata->mdio_driver) xgene_gmac_reset(pdata); xgene_gmac_set_speed(pdata); xgene_gmac_set_mac_addr(pdata); /* Adjust MDC clock frequency */ value = xgene_enet_rd_mac(pdata, MII_MGMT_CONFIG_ADDR); MGMT_CLOCK_SEL_SET(&value, 7); xgene_enet_wr_mac(pdata, MII_MGMT_CONFIG_ADDR, value); /* Enable drop if bufpool not available */ xgene_enet_rd_csr(pdata, RSIF_CONFIG_REG_ADDR, &value); value |= CFG_RSIF_FPBUFF_TIMEOUT_EN; xgene_enet_wr_csr(pdata, RSIF_CONFIG_REG_ADDR, value); /* Rtype should be copied from FP */ xgene_enet_wr_csr(pdata, RSIF_RAM_DBG_REG0_ADDR, 0); /* Configure HW pause frame generation */ xgene_enet_rd_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, &value); value = (DEF_QUANTA << 16) | (value & 0xFFFF); xgene_enet_wr_mcx_csr(pdata, CSR_MULTI_DPF0_ADDR, value); xgene_enet_wr_csr(pdata, RXBUF_PAUSE_THRESH, DEF_PAUSE_THRES); xgene_enet_wr_csr(pdata, RXBUF_PAUSE_OFF_THRESH, DEF_PAUSE_OFF_THRES); xgene_gmac_flowctl_tx(pdata, pdata->tx_pause); xgene_gmac_flowctl_rx(pdata, pdata->rx_pause); /* Rx-Tx traffic resume */ xgene_enet_wr_csr(pdata, CFG_LINK_AGGR_RESUME_0_ADDR, TX_PORT0); xgene_enet_rd_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, &value); value &= ~TX_DV_GATE_EN0; value &= ~RX_DV_GATE_EN0; value |= RESUME_RX0; xgene_enet_wr_mcx_csr(pdata, RX_DV_GATE_REG_0_ADDR, value); xgene_enet_wr_csr(pdata, CFG_BYPASS_ADDR, RESUME_TX); } static void xgene_gmac_get_drop_cnt(struct xgene_enet_pdata *pdata, u32 *rx, u32 *tx) { u32 count; xgene_enet_rd_mcx_csr(pdata, ICM_ECM_DROP_COUNT_REG0_ADDR, &count); *rx = ICM_DROP_COUNT(count); *tx = ECM_DROP_COUNT(count); /* Errata: 10GE_4 - Fix ICM_ECM_DROP_COUNT not clear-on-read */ xgene_enet_rd_mcx_csr(pdata, ECM_CONFIG0_REG_0_ADDR, &count); } static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *pdata) { u32 val = 0xffffffff; xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIWQASSOC_ADDR, val); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIFPQASSOC_ADDR, val); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEWQASSOC_ADDR, val); xgene_enet_wr_ring_if(pdata, ENET_CFGSSQMIQMLITEFPQASSOC_ADDR, val); } static void xgene_enet_cle_bypass(struct xgene_enet_pdata *pdata, u32 dst_ring_num, u16 bufpool_id, u16 nxtbufpool_id) { u32 cb; u32 fpsel, nxtfpsel; fpsel = xgene_enet_get_fpsel(bufpool_id); nxtfpsel = xgene_enet_get_fpsel(nxtbufpool_id); xgene_enet_rd_csr(pdata, CLE_BYPASS_REG0_0_ADDR, &cb); cb |= CFG_CLE_BYPASS_EN0; CFG_CLE_IP_PROTOCOL0_SET(&cb, 3); CFG_CLE_IP_HDR_LEN_SET(&cb, 0); xgene_enet_wr_csr(pdata, CLE_BYPASS_REG0_0_ADDR, cb); xgene_enet_rd_csr(pdata, CLE_BYPASS_REG1_0_ADDR, &cb); CFG_CLE_DSTQID0_SET(&cb, dst_ring_num); CFG_CLE_FPSEL0_SET(&cb, fpsel); CFG_CLE_NXTFPSEL0_SET(&cb, nxtfpsel); xgene_enet_wr_csr(pdata, CLE_BYPASS_REG1_0_ADDR, cb); } static void xgene_gmac_rx_enable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | RX_EN); } static void xgene_gmac_tx_enable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data | TX_EN); } static void xgene_gmac_rx_disable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~RX_EN); } static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata) { u32 data; data = xgene_enet_rd_mac(pdata, MAC_CONFIG_1_ADDR); xgene_enet_wr_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); } bool xgene_ring_mgr_init(struct xgene_enet_pdata *p) { if (!ioread32(p->ring_csr_addr + CLKEN_ADDR)) return false; if (ioread32(p->ring_csr_addr + SRST_ADDR)) return false; return true; } static int xgene_enet_reset(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; if (!xgene_ring_mgr_init(pdata)) return -ENODEV; if (pdata->mdio_driver) { xgene_enet_config_ring_if_assoc(pdata); return 0; } if (dev->of_node) { clk_prepare_enable(pdata->clk); udelay(5); clk_disable_unprepare(pdata->clk); udelay(5); clk_prepare_enable(pdata->clk); udelay(5); } else { #ifdef CONFIG_ACPI acpi_status status; status = acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), "_RST", NULL, NULL); if (ACPI_FAILURE(status)) { acpi_evaluate_object(ACPI_HANDLE(&pdata->pdev->dev), "_INI", NULL, NULL); } #endif } xgene_enet_ecc_init(pdata); xgene_enet_config_ring_if_assoc(pdata); return 0; } static void xgene_enet_clear(struct xgene_enet_pdata *pdata, struct xgene_enet_desc_ring *ring) { u32 addr, data; if (xgene_enet_is_bufpool(ring->id)) { addr = ENET_CFGSSQMIFPRESET_ADDR; data = BIT(xgene_enet_get_fpsel(ring->id)); } else { addr = ENET_CFGSSQMIWQRESET_ADDR; data = BIT(xgene_enet_ring_bufnum(ring->id)); } xgene_enet_wr_ring_if(pdata, addr, data); } static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) { struct device *dev = &pdata->pdev->dev; if (dev->of_node) { if (!IS_ERR(pdata->clk)) clk_disable_unprepare(pdata->clk); } } static u32 xgene_enet_flowctrl_cfg(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct phy_device *phydev = ndev->phydev; u16 lcladv, rmtadv = 0; u32 rx_pause, tx_pause; u8 flowctl = 0; if (!phydev->duplex || !pdata->pause_autoneg) return 0; if (pdata->tx_pause) flowctl |= FLOW_CTRL_TX; if (pdata->rx_pause) flowctl |= FLOW_CTRL_RX; lcladv = mii_advertise_flowctrl(flowctl); if (phydev->pause) rmtadv = LPA_PAUSE_CAP; if (phydev->asym_pause) rmtadv |= LPA_PAUSE_ASYM; flowctl = mii_resolve_flowctrl_fdx(lcladv, rmtadv); tx_pause = !!(flowctl & FLOW_CTRL_TX); rx_pause = !!(flowctl & FLOW_CTRL_RX); if (tx_pause != pdata->tx_pause) { pdata->tx_pause = tx_pause; pdata->mac_ops->flowctl_tx(pdata, pdata->tx_pause); } if (rx_pause != pdata->rx_pause) { pdata->rx_pause = rx_pause; pdata->mac_ops->flowctl_rx(pdata, pdata->rx_pause); } return 0; } static void xgene_enet_adjust_link(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); const struct xgene_mac_ops *mac_ops = pdata->mac_ops; struct phy_device *phydev = ndev->phydev; if (phydev->link) { if (pdata->phy_speed != phydev->speed) { pdata->phy_speed = phydev->speed; mac_ops->set_speed(pdata); mac_ops->rx_enable(pdata); mac_ops->tx_enable(pdata); phy_print_status(phydev); } xgene_enet_flowctrl_cfg(ndev); } else { mac_ops->rx_disable(pdata); mac_ops->tx_disable(pdata); pdata->phy_speed = SPEED_UNKNOWN; phy_print_status(phydev); } } #ifdef CONFIG_ACPI static struct acpi_device *acpi_phy_find_device(struct device *dev) { struct fwnode_reference_args args; struct fwnode_handle *fw_node; int status; fw_node = acpi_fwnode_handle(ACPI_COMPANION(dev)); status = acpi_node_get_property_reference(fw_node, "phy-handle", 0, &args); if (ACPI_FAILURE(status) || !is_acpi_device_node(args.fwnode)) { dev_dbg(dev, "No matching phy in ACPI table\n"); return NULL; } return to_acpi_device_node(args.fwnode); } #endif int xgene_enet_phy_connect(struct net_device *ndev) { struct xgene_enet_pdata *pdata = netdev_priv(ndev); struct device_node *np; struct phy_device *phy_dev; struct device *dev = &pdata->pdev->dev; int i; if (dev->of_node) { for (i = 0 ; i < 2; i++) { np = of_parse_phandle(dev->of_node, "phy-handle", i); phy_dev = of_phy_connect(ndev, np, &xgene_enet_adjust_link, 0, pdata->phy_mode); of_node_put(np); if (phy_dev) break; } if (!phy_dev) { netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; } } else { #ifdef CONFIG_ACPI struct acpi_device *adev = acpi_phy_find_device(dev); if (adev) phy_dev = adev->driver_data; else phy_dev = NULL; if (!phy_dev || phy_connect_direct(ndev, phy_dev, &xgene_enet_adjust_link, pdata->phy_mode)) { netdev_err(ndev, "Could not connect to PHY\n"); return -ENODEV; } #else return -ENODEV; #endif } pdata->phy_speed = SPEED_UNKNOWN; phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_10baseT_Half_BIT); phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); phy_remove_link_mode(phy_dev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT); phy_support_asym_pause(phy_dev); return 0; } static int xgene_mdiobus_register(struct xgene_enet_pdata *pdata, struct mii_bus *mdio) { struct device *dev = &pdata->pdev->dev; struct net_device *ndev = pdata->ndev; struct phy_device *phy; struct device_node *child_np; struct device_node *mdio_np = NULL; u32 phy_addr; int ret; if (dev->of_node) { for_each_child_of_node(dev->of_node, child_np) { if (of_device_is_compatible(child_np, "apm,xgene-mdio")) { mdio_np = child_np; break; } } if (!mdio_np) { netdev_dbg(ndev, "No mdio node in the dts\n"); return -ENXIO; } return of_mdiobus_register(mdio, mdio_np); } /* Mask out all PHYs from auto probing. */ mdio->phy_mask = ~0; /* Register the MDIO bus */ ret = mdiobus_register(mdio); if (ret) return ret; ret = device_property_read_u32(dev, "phy-channel", &phy_addr); if (ret) ret = device_property_read_u32(dev, "phy-addr", &phy_addr); if (ret) return -EINVAL; phy = xgene_enet_phy_register(mdio, phy_addr); if (!phy) return -EIO; return ret; } int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; struct mii_bus *mdio_bus; int ret; mdio_bus = mdiobus_alloc(); if (!mdio_bus) return -ENOMEM; mdio_bus->name = "APM X-Gene MDIO bus"; mdio_bus->read = xgene_mdio_rgmii_read; mdio_bus->write = xgene_mdio_rgmii_write; snprintf(mdio_bus->id, MII_BUS_ID_SIZE, "%s-%s", "xgene-mii", ndev->name); mdio_bus->priv = (void __force *)pdata->mcx_mac_addr; mdio_bus->parent = &pdata->pdev->dev; ret = xgene_mdiobus_register(pdata, mdio_bus); if (ret) { netdev_err(ndev, "Failed to register MDIO bus\n"); mdiobus_free(mdio_bus); return ret; } pdata->mdio_bus = mdio_bus; ret = xgene_enet_phy_connect(ndev); if (ret) xgene_enet_mdio_remove(pdata); return ret; } void xgene_enet_phy_disconnect(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; if (ndev->phydev) phy_disconnect(ndev->phydev); } void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata) { struct net_device *ndev = pdata->ndev; if (ndev->phydev) phy_disconnect(ndev->phydev); mdiobus_unregister(pdata->mdio_bus); mdiobus_free(pdata->mdio_bus); pdata->mdio_bus = NULL; } const struct xgene_mac_ops xgene_gmac_ops = { .init = xgene_gmac_init, .reset = xgene_gmac_reset, .rx_enable = xgene_gmac_rx_enable, .tx_enable = xgene_gmac_tx_enable, .rx_disable = xgene_gmac_rx_disable, .tx_disable = xgene_gmac_tx_disable, .get_drop_cnt = xgene_gmac_get_drop_cnt, .set_speed = xgene_gmac_set_speed, .set_mac_addr = xgene_gmac_set_mac_addr, .set_framesize = xgene_enet_set_frame_size, .enable_tx_pause = xgene_gmac_enable_tx_pause, .flowctl_tx = xgene_gmac_flowctl_tx, .flowctl_rx = xgene_gmac_flowctl_rx, }; const struct xgene_port_ops xgene_gport_ops = { .reset = xgene_enet_reset, .clear = xgene_enet_clear, .cle_bypass = xgene_enet_cle_bypass, .shutdown = xgene_gport_shutdown, }; struct xgene_ring_ops xgene_ring1_ops = { .num_ring_config = NUM_RING_CONFIG, .num_ring_id_shift = 6, .setup = xgene_enet_setup_ring, .clear = xgene_enet_clear_ring, .wr_cmd = xgene_enet_wr_cmd, .len = xgene_enet_ring_len, };
// SPDX-License-Identifier: GPL-2.0-or-later /* * sama5d31.dtsi - Device Tree Include file for SAMA5D31 SoC * * Copyright (C) 2013 Boris BREZILLON <[email protected]> */ #include "sama5d3.dtsi" #include "sama5d3_lcd.dtsi" #include "sama5d3_emac.dtsi" #include "sama5d3_mci2.dtsi" #include "sama5d3_uart.dtsi" / { compatible = "atmel,sama5d31", "atmel,sama5d3", "atmel,sama5"; };
/* SPDX-License-Identifier: GPL-2.0 * * Copyright 2016-2018 HabanaLabs, Ltd. * All Rights Reserved. * */ /************************************ ** This is an auto-generated file ** ** DO NOT EDIT BELOW ** ************************************/ #ifndef ASIC_REG_DMA0_QM_REGS_H_ #define ASIC_REG_DMA0_QM_REGS_H_ /* ***************************************** * DMA0_QM (Prototype: QMAN) ***************************************** */ #define mmDMA0_QM_GLBL_CFG0 0x508000 #define mmDMA0_QM_GLBL_CFG1 0x508004 #define mmDMA0_QM_GLBL_PROT 0x508008 #define mmDMA0_QM_GLBL_ERR_CFG 0x50800C #define mmDMA0_QM_GLBL_SECURE_PROPS_0 0x508010 #define mmDMA0_QM_GLBL_SECURE_PROPS_1 0x508014 #define mmDMA0_QM_GLBL_SECURE_PROPS_2 0x508018 #define mmDMA0_QM_GLBL_SECURE_PROPS_3 0x50801C #define mmDMA0_QM_GLBL_SECURE_PROPS_4 0x508020 #define mmDMA0_QM_GLBL_NON_SECURE_PROPS_0 0x508024 #define mmDMA0_QM_GLBL_NON_SECURE_PROPS_1 0x508028 #define mmDMA0_QM_GLBL_NON_SECURE_PROPS_2 0x50802C #define mmDMA0_QM_GLBL_NON_SECURE_PROPS_3 0x508030 #define mmDMA0_QM_GLBL_NON_SECURE_PROPS_4 0x508034 #define mmDMA0_QM_GLBL_STS0 0x508038 #define mmDMA0_QM_GLBL_STS1_0 0x508040 #define mmDMA0_QM_GLBL_STS1_1 0x508044 #define mmDMA0_QM_GLBL_STS1_2 0x508048 #define mmDMA0_QM_GLBL_STS1_3 0x50804C #define mmDMA0_QM_GLBL_STS1_4 0x508050 #define mmDMA0_QM_GLBL_MSG_EN_0 0x508054 #define mmDMA0_QM_GLBL_MSG_EN_1 0x508058 #define mmDMA0_QM_GLBL_MSG_EN_2 0x50805C #define mmDMA0_QM_GLBL_MSG_EN_3 0x508060 #define mmDMA0_QM_GLBL_MSG_EN_4 0x508068 #define mmDMA0_QM_PQ_BASE_LO_0 0x508070 #define mmDMA0_QM_PQ_BASE_LO_1 0x508074 #define mmDMA0_QM_PQ_BASE_LO_2 0x508078 #define mmDMA0_QM_PQ_BASE_LO_3 0x50807C #define mmDMA0_QM_PQ_BASE_HI_0 0x508080 #define mmDMA0_QM_PQ_BASE_HI_1 0x508084 #define mmDMA0_QM_PQ_BASE_HI_2 0x508088 #define mmDMA0_QM_PQ_BASE_HI_3 0x50808C #define mmDMA0_QM_PQ_SIZE_0 0x508090 #define mmDMA0_QM_PQ_SIZE_1 0x508094 #define mmDMA0_QM_PQ_SIZE_2 0x508098 #define mmDMA0_QM_PQ_SIZE_3 0x50809C #define mmDMA0_QM_PQ_PI_0 0x5080A0 #define mmDMA0_QM_PQ_PI_1 0x5080A4 #define mmDMA0_QM_PQ_PI_2 0x5080A8 #define mmDMA0_QM_PQ_PI_3 0x5080AC #define mmDMA0_QM_PQ_CI_0 0x5080B0 #define mmDMA0_QM_PQ_CI_1 0x5080B4 #define mmDMA0_QM_PQ_CI_2 0x5080B8 #define mmDMA0_QM_PQ_CI_3 0x5080BC #define mmDMA0_QM_PQ_CFG0_0 0x5080C0 #define mmDMA0_QM_PQ_CFG0_1 0x5080C4 #define mmDMA0_QM_PQ_CFG0_2 0x5080C8 #define mmDMA0_QM_PQ_CFG0_3 0x5080CC #define mmDMA0_QM_PQ_CFG1_0 0x5080D0 #define mmDMA0_QM_PQ_CFG1_1 0x5080D4 #define mmDMA0_QM_PQ_CFG1_2 0x5080D8 #define mmDMA0_QM_PQ_CFG1_3 0x5080DC #define mmDMA0_QM_PQ_ARUSER_31_11_0 0x5080E0 #define mmDMA0_QM_PQ_ARUSER_31_11_1 0x5080E4 #define mmDMA0_QM_PQ_ARUSER_31_11_2 0x5080E8 #define mmDMA0_QM_PQ_ARUSER_31_11_3 0x5080EC #define mmDMA0_QM_PQ_STS0_0 0x5080F0 #define mmDMA0_QM_PQ_STS0_1 0x5080F4 #define mmDMA0_QM_PQ_STS0_2 0x5080F8 #define mmDMA0_QM_PQ_STS0_3 0x5080FC #define mmDMA0_QM_PQ_STS1_0 0x508100 #define mmDMA0_QM_PQ_STS1_1 0x508104 #define mmDMA0_QM_PQ_STS1_2 0x508108 #define mmDMA0_QM_PQ_STS1_3 0x50810C #define mmDMA0_QM_CQ_CFG0_0 0x508110 #define mmDMA0_QM_CQ_CFG0_1 0x508114 #define mmDMA0_QM_CQ_CFG0_2 0x508118 #define mmDMA0_QM_CQ_CFG0_3 0x50811C #define mmDMA0_QM_CQ_CFG0_4 0x508120 #define mmDMA0_QM_CQ_CFG1_0 0x508124 #define mmDMA0_QM_CQ_CFG1_1 0x508128 #define mmDMA0_QM_CQ_CFG1_2 0x50812C #define mmDMA0_QM_CQ_CFG1_3 0x508130 #define mmDMA0_QM_CQ_CFG1_4 0x508134 #define mmDMA0_QM_CQ_ARUSER_31_11_0 0x508138 #define mmDMA0_QM_CQ_ARUSER_31_11_1 0x50813C #define mmDMA0_QM_CQ_ARUSER_31_11_2 0x508140 #define mmDMA0_QM_CQ_ARUSER_31_11_3 0x508144 #define mmDMA0_QM_CQ_ARUSER_31_11_4 0x508148 #define mmDMA0_QM_CQ_STS0_0 0x50814C #define mmDMA0_QM_CQ_STS0_1 0x508150 #define mmDMA0_QM_CQ_STS0_2 0x508154 #define mmDMA0_QM_CQ_STS0_3 0x508158 #define mmDMA0_QM_CQ_STS0_4 0x50815C #define mmDMA0_QM_CQ_STS1_0 0x508160 #define mmDMA0_QM_CQ_STS1_1 0x508164 #define mmDMA0_QM_CQ_STS1_2 0x508168 #define mmDMA0_QM_CQ_STS1_3 0x50816C #define mmDMA0_QM_CQ_STS1_4 0x508170 #define mmDMA0_QM_CQ_PTR_LO_0 0x508174 #define mmDMA0_QM_CQ_PTR_HI_0 0x508178 #define mmDMA0_QM_CQ_TSIZE_0 0x50817C #define mmDMA0_QM_CQ_CTL_0 0x508180 #define mmDMA0_QM_CQ_PTR_LO_1 0x508184 #define mmDMA0_QM_CQ_PTR_HI_1 0x508188 #define mmDMA0_QM_CQ_TSIZE_1 0x50818C #define mmDMA0_QM_CQ_CTL_1 0x508190 #define mmDMA0_QM_CQ_PTR_LO_2 0x508194 #define mmDMA0_QM_CQ_PTR_HI_2 0x508198 #define mmDMA0_QM_CQ_TSIZE_2 0x50819C #define mmDMA0_QM_CQ_CTL_2 0x5081A0 #define mmDMA0_QM_CQ_PTR_LO_3 0x5081A4 #define mmDMA0_QM_CQ_PTR_HI_3 0x5081A8 #define mmDMA0_QM_CQ_TSIZE_3 0x5081AC #define mmDMA0_QM_CQ_CTL_3 0x5081B0 #define mmDMA0_QM_CQ_PTR_LO_4 0x5081B4 #define mmDMA0_QM_CQ_PTR_HI_4 0x5081B8 #define mmDMA0_QM_CQ_TSIZE_4 0x5081BC #define mmDMA0_QM_CQ_CTL_4 0x5081C0 #define mmDMA0_QM_CQ_PTR_LO_STS_0 0x5081C4 #define mmDMA0_QM_CQ_PTR_LO_STS_1 0x5081C8 #define mmDMA0_QM_CQ_PTR_LO_STS_2 0x5081CC #define mmDMA0_QM_CQ_PTR_LO_STS_3 0x5081D0 #define mmDMA0_QM_CQ_PTR_LO_STS_4 0x5081D4 #define mmDMA0_QM_CQ_PTR_HI_STS_0 0x5081D8 #define mmDMA0_QM_CQ_PTR_HI_STS_1 0x5081DC #define mmDMA0_QM_CQ_PTR_HI_STS_2 0x5081E0 #define mmDMA0_QM_CQ_PTR_HI_STS_3 0x5081E4 #define mmDMA0_QM_CQ_PTR_HI_STS_4 0x5081E8 #define mmDMA0_QM_CQ_TSIZE_STS_0 0x5081EC #define mmDMA0_QM_CQ_TSIZE_STS_1 0x5081F0 #define mmDMA0_QM_CQ_TSIZE_STS_2 0x5081F4 #define mmDMA0_QM_CQ_TSIZE_STS_3 0x5081F8 #define mmDMA0_QM_CQ_TSIZE_STS_4 0x5081FC #define mmDMA0_QM_CQ_CTL_STS_0 0x508200 #define mmDMA0_QM_CQ_CTL_STS_1 0x508204 #define mmDMA0_QM_CQ_CTL_STS_2 0x508208 #define mmDMA0_QM_CQ_CTL_STS_3 0x50820C #define mmDMA0_QM_CQ_CTL_STS_4 0x508210 #define mmDMA0_QM_CQ_IFIFO_CNT_0 0x508214 #define mmDMA0_QM_CQ_IFIFO_CNT_1 0x508218 #define mmDMA0_QM_CQ_IFIFO_CNT_2 0x50821C #define mmDMA0_QM_CQ_IFIFO_CNT_3 0x508220 #define mmDMA0_QM_CQ_IFIFO_CNT_4 0x508224 #define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_0 0x508228 #define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_1 0x50822C #define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_2 0x508230 #define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_3 0x508234 #define mmDMA0_QM_CP_MSG_BASE0_ADDR_LO_4 0x508238 #define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_0 0x50823C #define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_1 0x508240 #define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_2 0x508244 #define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_3 0x508248 #define mmDMA0_QM_CP_MSG_BASE0_ADDR_HI_4 0x50824C #define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_0 0x508250 #define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_1 0x508254 #define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_2 0x508258 #define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_3 0x50825C #define mmDMA0_QM_CP_MSG_BASE1_ADDR_LO_4 0x508260 #define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_0 0x508264 #define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_1 0x508268 #define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_2 0x50826C #define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_3 0x508270 #define mmDMA0_QM_CP_MSG_BASE1_ADDR_HI_4 0x508274 #define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_0 0x508278 #define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_1 0x50827C #define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_2 0x508280 #define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_3 0x508284 #define mmDMA0_QM_CP_MSG_BASE2_ADDR_LO_4 0x508288 #define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_0 0x50828C #define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_1 0x508290 #define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_2 0x508294 #define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_3 0x508298 #define mmDMA0_QM_CP_MSG_BASE2_ADDR_HI_4 0x50829C #define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_0 0x5082A0 #define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_1 0x5082A4 #define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_2 0x5082A8 #define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_3 0x5082AC #define mmDMA0_QM_CP_MSG_BASE3_ADDR_LO_4 0x5082B0 #define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_0 0x5082B4 #define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_1 0x5082B8 #define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_2 0x5082BC #define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_3 0x5082C0 #define mmDMA0_QM_CP_MSG_BASE3_ADDR_HI_4 0x5082C4 #define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_0 0x5082C8 #define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_1 0x5082CC #define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_2 0x5082D0 #define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_3 0x5082D4 #define mmDMA0_QM_CP_LDMA_TSIZE_OFFSET_4 0x5082D8 #define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_0 0x5082E0 #define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_1 0x5082E4 #define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_2 0x5082E8 #define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_3 0x5082EC #define mmDMA0_QM_CP_LDMA_SRC_BASE_LO_OFFSET_4 0x5082F0 #define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_0 0x5082F4 #define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_1 0x5082F8 #define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_2 0x5082FC #define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_3 0x508300 #define mmDMA0_QM_CP_LDMA_DST_BASE_LO_OFFSET_4 0x508304 #define mmDMA0_QM_CP_FENCE0_RDATA_0 0x508308 #define mmDMA0_QM_CP_FENCE0_RDATA_1 0x50830C #define mmDMA0_QM_CP_FENCE0_RDATA_2 0x508310 #define mmDMA0_QM_CP_FENCE0_RDATA_3 0x508314 #define mmDMA0_QM_CP_FENCE0_RDATA_4 0x508318 #define mmDMA0_QM_CP_FENCE1_RDATA_0 0x50831C #define mmDMA0_QM_CP_FENCE1_RDATA_1 0x508320 #define mmDMA0_QM_CP_FENCE1_RDATA_2 0x508324 #define mmDMA0_QM_CP_FENCE1_RDATA_3 0x508328 #define mmDMA0_QM_CP_FENCE1_RDATA_4 0x50832C #define mmDMA0_QM_CP_FENCE2_RDATA_0 0x508330 #define mmDMA0_QM_CP_FENCE2_RDATA_1 0x508334 #define mmDMA0_QM_CP_FENCE2_RDATA_2 0x508338 #define mmDMA0_QM_CP_FENCE2_RDATA_3 0x50833C #define mmDMA0_QM_CP_FENCE2_RDATA_4 0x508340 #define mmDMA0_QM_CP_FENCE3_RDATA_0 0x508344 #define mmDMA0_QM_CP_FENCE3_RDATA_1 0x508348 #define mmDMA0_QM_CP_FENCE3_RDATA_2 0x50834C #define mmDMA0_QM_CP_FENCE3_RDATA_3 0x508350 #define mmDMA0_QM_CP_FENCE3_RDATA_4 0x508354 #define mmDMA0_QM_CP_FENCE0_CNT_0 0x508358 #define mmDMA0_QM_CP_FENCE0_CNT_1 0x50835C #define mmDMA0_QM_CP_FENCE0_CNT_2 0x508360 #define mmDMA0_QM_CP_FENCE0_CNT_3 0x508364 #define mmDMA0_QM_CP_FENCE0_CNT_4 0x508368 #define mmDMA0_QM_CP_FENCE1_CNT_0 0x50836C #define mmDMA0_QM_CP_FENCE1_CNT_1 0x508370 #define mmDMA0_QM_CP_FENCE1_CNT_2 0x508374 #define mmDMA0_QM_CP_FENCE1_CNT_3 0x508378 #define mmDMA0_QM_CP_FENCE1_CNT_4 0x50837C #define mmDMA0_QM_CP_FENCE2_CNT_0 0x508380 #define mmDMA0_QM_CP_FENCE2_CNT_1 0x508384 #define mmDMA0_QM_CP_FENCE2_CNT_2 0x508388 #define mmDMA0_QM_CP_FENCE2_CNT_3 0x50838C #define mmDMA0_QM_CP_FENCE2_CNT_4 0x508390 #define mmDMA0_QM_CP_FENCE3_CNT_0 0x508394 #define mmDMA0_QM_CP_FENCE3_CNT_1 0x508398 #define mmDMA0_QM_CP_FENCE3_CNT_2 0x50839C #define mmDMA0_QM_CP_FENCE3_CNT_3 0x5083A0 #define mmDMA0_QM_CP_FENCE3_CNT_4 0x5083A4 #define mmDMA0_QM_CP_STS_0 0x5083A8 #define mmDMA0_QM_CP_STS_1 0x5083AC #define mmDMA0_QM_CP_STS_2 0x5083B0 #define mmDMA0_QM_CP_STS_3 0x5083B4 #define mmDMA0_QM_CP_STS_4 0x5083B8 #define mmDMA0_QM_CP_CURRENT_INST_LO_0 0x5083BC #define mmDMA0_QM_CP_CURRENT_INST_LO_1 0x5083C0 #define mmDMA0_QM_CP_CURRENT_INST_LO_2 0x5083C4 #define mmDMA0_QM_CP_CURRENT_INST_LO_3 0x5083C8 #define mmDMA0_QM_CP_CURRENT_INST_LO_4 0x5083CC #define mmDMA0_QM_CP_CURRENT_INST_HI_0 0x5083D0 #define mmDMA0_QM_CP_CURRENT_INST_HI_1 0x5083D4 #define mmDMA0_QM_CP_CURRENT_INST_HI_2 0x5083D8 #define mmDMA0_QM_CP_CURRENT_INST_HI_3 0x5083DC #define mmDMA0_QM_CP_CURRENT_INST_HI_4 0x5083E0 #define mmDMA0_QM_CP_BARRIER_CFG_0 0x5083F4 #define mmDMA0_QM_CP_BARRIER_CFG_1 0x5083F8 #define mmDMA0_QM_CP_BARRIER_CFG_2 0x5083FC #define mmDMA0_QM_CP_BARRIER_CFG_3 0x508400 #define mmDMA0_QM_CP_BARRIER_CFG_4 0x508404 #define mmDMA0_QM_CP_DBG_0_0 0x508408 #define mmDMA0_QM_CP_DBG_0_1 0x50840C #define mmDMA0_QM_CP_DBG_0_2 0x508410 #define mmDMA0_QM_CP_DBG_0_3 0x508414 #define mmDMA0_QM_CP_DBG_0_4 0x508418 #define mmDMA0_QM_CP_ARUSER_31_11_0 0x50841C #define mmDMA0_QM_CP_ARUSER_31_11_1 0x508420 #define mmDMA0_QM_CP_ARUSER_31_11_2 0x508424 #define mmDMA0_QM_CP_ARUSER_31_11_3 0x508428 #define mmDMA0_QM_CP_ARUSER_31_11_4 0x50842C #define mmDMA0_QM_CP_AWUSER_31_11_0 0x508430 #define mmDMA0_QM_CP_AWUSER_31_11_1 0x508434 #define mmDMA0_QM_CP_AWUSER_31_11_2 0x508438 #define mmDMA0_QM_CP_AWUSER_31_11_3 0x50843C #define mmDMA0_QM_CP_AWUSER_31_11_4 0x508440 #define mmDMA0_QM_ARB_CFG_0 0x508A00 #define mmDMA0_QM_ARB_CHOISE_Q_PUSH 0x508A04 #define mmDMA0_QM_ARB_WRR_WEIGHT_0 0x508A08 #define mmDMA0_QM_ARB_WRR_WEIGHT_1 0x508A0C #define mmDMA0_QM_ARB_WRR_WEIGHT_2 0x508A10 #define mmDMA0_QM_ARB_WRR_WEIGHT_3 0x508A14 #define mmDMA0_QM_ARB_CFG_1 0x508A18 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_0 0x508A20 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_1 0x508A24 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_2 0x508A28 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_3 0x508A2C #define mmDMA0_QM_ARB_MST_AVAIL_CRED_4 0x508A30 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_5 0x508A34 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_6 0x508A38 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_7 0x508A3C #define mmDMA0_QM_ARB_MST_AVAIL_CRED_8 0x508A40 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_9 0x508A44 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_10 0x508A48 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_11 0x508A4C #define mmDMA0_QM_ARB_MST_AVAIL_CRED_12 0x508A50 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_13 0x508A54 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_14 0x508A58 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_15 0x508A5C #define mmDMA0_QM_ARB_MST_AVAIL_CRED_16 0x508A60 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_17 0x508A64 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_18 0x508A68 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_19 0x508A6C #define mmDMA0_QM_ARB_MST_AVAIL_CRED_20 0x508A70 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_21 0x508A74 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_22 0x508A78 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_23 0x508A7C #define mmDMA0_QM_ARB_MST_AVAIL_CRED_24 0x508A80 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_25 0x508A84 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_26 0x508A88 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_27 0x508A8C #define mmDMA0_QM_ARB_MST_AVAIL_CRED_28 0x508A90 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_29 0x508A94 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_30 0x508A98 #define mmDMA0_QM_ARB_MST_AVAIL_CRED_31 0x508A9C #define mmDMA0_QM_ARB_MST_CRED_INC 0x508AA0 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_0 0x508AA4 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_1 0x508AA8 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_2 0x508AAC #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_3 0x508AB0 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_4 0x508AB4 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_5 0x508AB8 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_6 0x508ABC #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_7 0x508AC0 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_8 0x508AC4 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_9 0x508AC8 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_10 0x508ACC #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_11 0x508AD0 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_12 0x508AD4 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_13 0x508AD8 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_14 0x508ADC #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_15 0x508AE0 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_16 0x508AE4 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_17 0x508AE8 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_18 0x508AEC #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_19 0x508AF0 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_20 0x508AF4 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_21 0x508AF8 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_22 0x508AFC #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_23 0x508B00 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_24 0x508B04 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_25 0x508B08 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_26 0x508B0C #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_27 0x508B10 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_28 0x508B14 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_29 0x508B18 #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_30 0x508B1C #define mmDMA0_QM_ARB_MST_CHOISE_PUSH_OFST_31 0x508B20 #define mmDMA0_QM_ARB_SLV_MASTER_INC_CRED_OFST 0x508B28 #define mmDMA0_QM_ARB_MST_SLAVE_EN 0x508B2C #define mmDMA0_QM_ARB_MST_QUIET_PER 0x508B34 #define mmDMA0_QM_ARB_SLV_CHOISE_WDT 0x508B38 #define mmDMA0_QM_ARB_SLV_ID 0x508B3C #define mmDMA0_QM_ARB_MSG_MAX_INFLIGHT 0x508B44 #define mmDMA0_QM_ARB_MSG_AWUSER_31_11 0x508B48 #define mmDMA0_QM_ARB_MSG_AWUSER_SEC_PROP 0x508B4C #define mmDMA0_QM_ARB_MSG_AWUSER_NON_SEC_PROP 0x508B50 #define mmDMA0_QM_ARB_BASE_LO 0x508B54 #define mmDMA0_QM_ARB_BASE_HI 0x508B58 #define mmDMA0_QM_ARB_STATE_STS 0x508B80 #define mmDMA0_QM_ARB_CHOISE_FULLNESS_STS 0x508B84 #define mmDMA0_QM_ARB_MSG_STS 0x508B88 #define mmDMA0_QM_ARB_SLV_CHOISE_Q_HEAD 0x508B8C #define mmDMA0_QM_ARB_ERR_CAUSE 0x508B9C #define mmDMA0_QM_ARB_ERR_MSG_EN 0x508BA0 #define mmDMA0_QM_ARB_ERR_STS_DRP 0x508BA8 #define mmDMA0_QM_ARB_MST_CRED_STS_0 0x508BB0 #define mmDMA0_QM_ARB_MST_CRED_STS_1 0x508BB4 #define mmDMA0_QM_ARB_MST_CRED_STS_2 0x508BB8 #define mmDMA0_QM_ARB_MST_CRED_STS_3 0x508BBC #define mmDMA0_QM_ARB_MST_CRED_STS_4 0x508BC0 #define mmDMA0_QM_ARB_MST_CRED_STS_5 0x508BC4 #define mmDMA0_QM_ARB_MST_CRED_STS_6 0x508BC8 #define mmDMA0_QM_ARB_MST_CRED_STS_7 0x508BCC #define mmDMA0_QM_ARB_MST_CRED_STS_8 0x508BD0 #define mmDMA0_QM_ARB_MST_CRED_STS_9 0x508BD4 #define mmDMA0_QM_ARB_MST_CRED_STS_10 0x508BD8 #define mmDMA0_QM_ARB_MST_CRED_STS_11 0x508BDC #define mmDMA0_QM_ARB_MST_CRED_STS_12 0x508BE0 #define mmDMA0_QM_ARB_MST_CRED_STS_13 0x508BE4 #define mmDMA0_QM_ARB_MST_CRED_STS_14 0x508BE8 #define mmDMA0_QM_ARB_MST_CRED_STS_15 0x508BEC #define mmDMA0_QM_ARB_MST_CRED_STS_16 0x508BF0 #define mmDMA0_QM_ARB_MST_CRED_STS_17 0x508BF4 #define mmDMA0_QM_ARB_MST_CRED_STS_18 0x508BF8 #define mmDMA0_QM_ARB_MST_CRED_STS_19 0x508BFC #define mmDMA0_QM_ARB_MST_CRED_STS_20 0x508C00 #define mmDMA0_QM_ARB_MST_CRED_STS_21 0x508C04 #define mmDMA0_QM_ARB_MST_CRED_STS_22 0x508C08 #define mmDMA0_QM_ARB_MST_CRED_STS_23 0x508C0C #define mmDMA0_QM_ARB_MST_CRED_STS_24 0x508C10 #define mmDMA0_QM_ARB_MST_CRED_STS_25 0x508C14 #define mmDMA0_QM_ARB_MST_CRED_STS_26 0x508C18 #define mmDMA0_QM_ARB_MST_CRED_STS_27 0x508C1C #define mmDMA0_QM_ARB_MST_CRED_STS_28 0x508C20 #define mmDMA0_QM_ARB_MST_CRED_STS_29 0x508C24 #define mmDMA0_QM_ARB_MST_CRED_STS_30 0x508C28 #define mmDMA0_QM_ARB_MST_CRED_STS_31 0x508C2C #define mmDMA0_QM_CGM_CFG 0x508C70 #define mmDMA0_QM_CGM_STS 0x508C74 #define mmDMA0_QM_CGM_CFG1 0x508C78 #define mmDMA0_QM_LOCAL_RANGE_BASE 0x508C80 #define mmDMA0_QM_LOCAL_RANGE_SIZE 0x508C84 #define mmDMA0_QM_CSMR_STRICT_PRIO_CFG 0x508C90 #define mmDMA0_QM_HBW_RD_RATE_LIM_CFG_1 0x508C94 #define mmDMA0_QM_LBW_WR_RATE_LIM_CFG_0 0x508C98 #define mmDMA0_QM_LBW_WR_RATE_LIM_CFG_1 0x508C9C #define mmDMA0_QM_HBW_RD_RATE_LIM_CFG_0 0x508CA0 #define mmDMA0_QM_GLBL_AXCACHE 0x508CA4 #define mmDMA0_QM_IND_GW_APB_CFG 0x508CB0 #define mmDMA0_QM_IND_GW_APB_WDATA 0x508CB4 #define mmDMA0_QM_IND_GW_APB_RDATA 0x508CB8 #define mmDMA0_QM_IND_GW_APB_STATUS 0x508CBC #define mmDMA0_QM_GLBL_ERR_ADDR_LO 0x508CD0 #define mmDMA0_QM_GLBL_ERR_ADDR_HI 0x508CD4 #define mmDMA0_QM_GLBL_ERR_WDATA 0x508CD8 #define mmDMA0_QM_GLBL_MEM_INIT_BUSY 0x508D00 #endif /* ASIC_REG_DMA0_QM_REGS_H_ */
// SPDX-License-Identifier: GPL-2.0 #include <linux/io.h> #include <linux/processor.h> #include <asm/sn/ioc3.h> #include <asm/setup.h> static inline struct ioc3_uartregs *console_uart(void) { struct ioc3 *ioc3; ioc3 = (struct ioc3 *)((void *)(0x900000001f600000)); return &ioc3->sregs.uarta; } void prom_putchar(char c) { struct ioc3_uartregs *uart = console_uart(); while ((readb(&uart->iu_lsr) & 0x20) == 0) cpu_relax(); writeb(c, &uart->iu_thr); }
// SPDX-License-Identifier: GPL-2.0 /* * Device Tree support for Allwinner A1X SoCs * * Copyright (C) 2012 Maxime Ripard * * Maxime Ripard <[email protected]> * */ #include <linux/clocksource.h> #include <linux/init.h> #include <linux/of_clk.h> #include <linux/platform_device.h> #include <linux/reset/sunxi.h> #include <asm/mach/arch.h> #include <asm/secure_cntvoff.h> static const char * const sunxi_board_dt_compat[] = { "allwinner,sun4i-a10", "allwinner,sun5i-a10s", "allwinner,sun5i-a13", "allwinner,sun5i-r8", "nextthing,gr8", NULL, }; DT_MACHINE_START(SUNXI_DT, "Allwinner sun4i/sun5i Families") .dt_compat = sunxi_board_dt_compat, MACHINE_END static const char * const sun6i_board_dt_compat[] = { "allwinner,sun6i-a31", "allwinner,sun6i-a31s", NULL, }; static void __init sun6i_timer_init(void) { of_clk_init(NULL); if (IS_ENABLED(CONFIG_RESET_CONTROLLER)) sun6i_reset_init(); timer_probe(); } DT_MACHINE_START(SUN6I_DT, "Allwinner sun6i (A31) Family") .init_time = sun6i_timer_init, .dt_compat = sun6i_board_dt_compat, MACHINE_END static const char * const sun7i_board_dt_compat[] = { "allwinner,sun7i-a20", NULL, }; DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family") .dt_compat = sun7i_board_dt_compat, MACHINE_END static const char * const sun8i_board_dt_compat[] = { "allwinner,sun8i-a23", "allwinner,sun8i-a33", "allwinner,sun8i-h2-plus", "allwinner,sun8i-h3", "allwinner,sun8i-r40", "allwinner,sun8i-v3", "allwinner,sun8i-v3s", NULL, }; DT_MACHINE_START(SUN8I_DT, "Allwinner sun8i Family") .init_time = sun6i_timer_init, .dt_compat = sun8i_board_dt_compat, MACHINE_END static void __init sun8i_a83t_cntvoff_init(void) { #ifdef CONFIG_SMP secure_cntvoff_init(); #endif } static const char * const sun8i_a83t_cntvoff_board_dt_compat[] = { "allwinner,sun8i-a83t", NULL, }; DT_MACHINE_START(SUN8I_A83T_CNTVOFF_DT, "Allwinner A83t board") .init_early = sun8i_a83t_cntvoff_init, .init_time = sun6i_timer_init, .dt_compat = sun8i_a83t_cntvoff_board_dt_compat, MACHINE_END static const char * const sun9i_board_dt_compat[] = { "allwinner,sun9i-a80", NULL, }; DT_MACHINE_START(SUN9I_DT, "Allwinner sun9i Family") .dt_compat = sun9i_board_dt_compat, MACHINE_END static const char * const suniv_board_dt_compat[] = { "allwinner,suniv-f1c100s", NULL, }; DT_MACHINE_START(SUNIV_DT, "Allwinner suniv Family") .dt_compat = suniv_board_dt_compat, MACHINE_END
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2019 MediaTek Inc. * Author: Ran Bi <[email protected]> */ #include <linux/delay.h> #include <linux/init.h> #include <linux/io.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/rtc.h> #define MT2712_BBPU 0x0000 #define MT2712_BBPU_CLRPKY BIT(4) #define MT2712_BBPU_RELOAD BIT(5) #define MT2712_BBPU_CBUSY BIT(6) #define MT2712_BBPU_KEY (0x43 << 8) #define MT2712_IRQ_STA 0x0004 #define MT2712_IRQ_STA_AL BIT(0) #define MT2712_IRQ_STA_TC BIT(1) #define MT2712_IRQ_EN 0x0008 #define MT2712_IRQ_EN_AL BIT(0) #define MT2712_IRQ_EN_TC BIT(1) #define MT2712_IRQ_EN_ONESHOT BIT(2) #define MT2712_CII_EN 0x000c #define MT2712_AL_MASK 0x0010 #define MT2712_AL_MASK_DOW BIT(4) #define MT2712_TC_SEC 0x0014 #define MT2712_TC_MIN 0x0018 #define MT2712_TC_HOU 0x001c #define MT2712_TC_DOM 0x0020 #define MT2712_TC_DOW 0x0024 #define MT2712_TC_MTH 0x0028 #define MT2712_TC_YEA 0x002c #define MT2712_AL_SEC 0x0030 #define MT2712_AL_MIN 0x0034 #define MT2712_AL_HOU 0x0038 #define MT2712_AL_DOM 0x003c #define MT2712_AL_DOW 0x0040 #define MT2712_AL_MTH 0x0044 #define MT2712_AL_YEA 0x0048 #define MT2712_SEC_MASK 0x003f #define MT2712_MIN_MASK 0x003f #define MT2712_HOU_MASK 0x001f #define MT2712_DOM_MASK 0x001f #define MT2712_DOW_MASK 0x0007 #define MT2712_MTH_MASK 0x000f #define MT2712_YEA_MASK 0x007f #define MT2712_POWERKEY1 0x004c #define MT2712_POWERKEY2 0x0050 #define MT2712_POWERKEY1_KEY 0xa357 #define MT2712_POWERKEY2_KEY 0x67d2 #define MT2712_CON0 0x005c #define MT2712_CON1 0x0060 #define MT2712_PROT 0x0070 #define MT2712_PROT_UNLOCK1 0x9136 #define MT2712_PROT_UNLOCK2 0x586a #define MT2712_WRTGR 0x0078 #define MT2712_RTC_TIMESTAMP_END_2127 4985971199LL struct mt2712_rtc { struct rtc_device *rtc; void __iomem *base; int irq; u8 irq_wake_enabled; u8 powerlost; }; static inline u32 mt2712_readl(struct mt2712_rtc *mt2712_rtc, u32 reg) { return readl(mt2712_rtc->base + reg); } static inline void mt2712_writel(struct mt2712_rtc *mt2712_rtc, u32 reg, u32 val) { writel(val, mt2712_rtc->base + reg); } static void mt2712_rtc_write_trigger(struct mt2712_rtc *mt2712_rtc) { unsigned long timeout = jiffies + HZ / 10; mt2712_writel(mt2712_rtc, MT2712_WRTGR, 1); while (1) { if (!(mt2712_readl(mt2712_rtc, MT2712_BBPU) & MT2712_BBPU_CBUSY)) break; if (time_after(jiffies, timeout)) { dev_err(&mt2712_rtc->rtc->dev, "%s time out!\n", __func__); break; } cpu_relax(); } } static void mt2712_rtc_writeif_unlock(struct mt2712_rtc *mt2712_rtc) { mt2712_writel(mt2712_rtc, MT2712_PROT, MT2712_PROT_UNLOCK1); mt2712_rtc_write_trigger(mt2712_rtc); mt2712_writel(mt2712_rtc, MT2712_PROT, MT2712_PROT_UNLOCK2); mt2712_rtc_write_trigger(mt2712_rtc); } static irqreturn_t rtc_irq_handler_thread(int irq, void *data) { struct mt2712_rtc *mt2712_rtc = data; u16 irqsta; /* Clear interrupt */ irqsta = mt2712_readl(mt2712_rtc, MT2712_IRQ_STA); if (irqsta & MT2712_IRQ_STA_AL) { rtc_update_irq(mt2712_rtc->rtc, 1, RTC_IRQF | RTC_AF); return IRQ_HANDLED; } return IRQ_NONE; } static void __mt2712_rtc_read_time(struct mt2712_rtc *mt2712_rtc, struct rtc_time *tm, int *sec) { tm->tm_sec = mt2712_readl(mt2712_rtc, MT2712_TC_SEC) & MT2712_SEC_MASK; tm->tm_min = mt2712_readl(mt2712_rtc, MT2712_TC_MIN) & MT2712_MIN_MASK; tm->tm_hour = mt2712_readl(mt2712_rtc, MT2712_TC_HOU) & MT2712_HOU_MASK; tm->tm_mday = mt2712_readl(mt2712_rtc, MT2712_TC_DOM) & MT2712_DOM_MASK; tm->tm_mon = (mt2712_readl(mt2712_rtc, MT2712_TC_MTH) - 1) & MT2712_MTH_MASK; tm->tm_year = (mt2712_readl(mt2712_rtc, MT2712_TC_YEA) + 100) & MT2712_YEA_MASK; *sec = mt2712_readl(mt2712_rtc, MT2712_TC_SEC) & MT2712_SEC_MASK; } static int mt2712_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); int sec; if (mt2712_rtc->powerlost) return -EINVAL; do { __mt2712_rtc_read_time(mt2712_rtc, tm, &sec); } while (sec < tm->tm_sec); /* SEC has carried */ return 0; } static int mt2712_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); mt2712_writel(mt2712_rtc, MT2712_TC_SEC, tm->tm_sec & MT2712_SEC_MASK); mt2712_writel(mt2712_rtc, MT2712_TC_MIN, tm->tm_min & MT2712_MIN_MASK); mt2712_writel(mt2712_rtc, MT2712_TC_HOU, tm->tm_hour & MT2712_HOU_MASK); mt2712_writel(mt2712_rtc, MT2712_TC_DOM, tm->tm_mday & MT2712_DOM_MASK); mt2712_writel(mt2712_rtc, MT2712_TC_MTH, (tm->tm_mon + 1) & MT2712_MTH_MASK); mt2712_writel(mt2712_rtc, MT2712_TC_YEA, (tm->tm_year - 100) & MT2712_YEA_MASK); mt2712_rtc_write_trigger(mt2712_rtc); if (mt2712_rtc->powerlost) mt2712_rtc->powerlost = false; return 0; } static int mt2712_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) { struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); struct rtc_time *tm = &alm->time; u16 irqen; irqen = mt2712_readl(mt2712_rtc, MT2712_IRQ_EN); alm->enabled = !!(irqen & MT2712_IRQ_EN_AL); tm->tm_sec = mt2712_readl(mt2712_rtc, MT2712_AL_SEC) & MT2712_SEC_MASK; tm->tm_min = mt2712_readl(mt2712_rtc, MT2712_AL_MIN) & MT2712_MIN_MASK; tm->tm_hour = mt2712_readl(mt2712_rtc, MT2712_AL_HOU) & MT2712_HOU_MASK; tm->tm_mday = mt2712_readl(mt2712_rtc, MT2712_AL_DOM) & MT2712_DOM_MASK; tm->tm_mon = (mt2712_readl(mt2712_rtc, MT2712_AL_MTH) - 1) & MT2712_MTH_MASK; tm->tm_year = (mt2712_readl(mt2712_rtc, MT2712_AL_YEA) + 100) & MT2712_YEA_MASK; return 0; } static int mt2712_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); u16 irqen; irqen = mt2712_readl(mt2712_rtc, MT2712_IRQ_EN); if (enabled) irqen |= MT2712_IRQ_EN_AL; else irqen &= ~MT2712_IRQ_EN_AL; mt2712_writel(mt2712_rtc, MT2712_IRQ_EN, irqen); mt2712_rtc_write_trigger(mt2712_rtc); return 0; } static int mt2712_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) { struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); struct rtc_time *tm = &alm->time; dev_dbg(&mt2712_rtc->rtc->dev, "set al time: %ptR, alm en: %d\n", tm, alm->enabled); mt2712_writel(mt2712_rtc, MT2712_AL_SEC, (mt2712_readl(mt2712_rtc, MT2712_AL_SEC) & ~(MT2712_SEC_MASK)) | (tm->tm_sec & MT2712_SEC_MASK)); mt2712_writel(mt2712_rtc, MT2712_AL_MIN, (mt2712_readl(mt2712_rtc, MT2712_AL_MIN) & ~(MT2712_MIN_MASK)) | (tm->tm_min & MT2712_MIN_MASK)); mt2712_writel(mt2712_rtc, MT2712_AL_HOU, (mt2712_readl(mt2712_rtc, MT2712_AL_HOU) & ~(MT2712_HOU_MASK)) | (tm->tm_hour & MT2712_HOU_MASK)); mt2712_writel(mt2712_rtc, MT2712_AL_DOM, (mt2712_readl(mt2712_rtc, MT2712_AL_DOM) & ~(MT2712_DOM_MASK)) | (tm->tm_mday & MT2712_DOM_MASK)); mt2712_writel(mt2712_rtc, MT2712_AL_MTH, (mt2712_readl(mt2712_rtc, MT2712_AL_MTH) & ~(MT2712_MTH_MASK)) | ((tm->tm_mon + 1) & MT2712_MTH_MASK)); mt2712_writel(mt2712_rtc, MT2712_AL_YEA, (mt2712_readl(mt2712_rtc, MT2712_AL_YEA) & ~(MT2712_YEA_MASK)) | ((tm->tm_year - 100) & MT2712_YEA_MASK)); /* mask day of week */ mt2712_writel(mt2712_rtc, MT2712_AL_MASK, MT2712_AL_MASK_DOW); mt2712_rtc_write_trigger(mt2712_rtc); mt2712_rtc_alarm_irq_enable(dev, alm->enabled); return 0; } /* Init RTC register */ static void mt2712_rtc_hw_init(struct mt2712_rtc *mt2712_rtc) { u32 p1, p2; mt2712_writel(mt2712_rtc, MT2712_BBPU, MT2712_BBPU_KEY | MT2712_BBPU_RELOAD); mt2712_writel(mt2712_rtc, MT2712_CII_EN, 0); mt2712_writel(mt2712_rtc, MT2712_AL_MASK, 0); /* necessary before set MT2712_POWERKEY */ mt2712_writel(mt2712_rtc, MT2712_CON0, 0x4848); mt2712_writel(mt2712_rtc, MT2712_CON1, 0x0048); mt2712_rtc_write_trigger(mt2712_rtc); p1 = mt2712_readl(mt2712_rtc, MT2712_POWERKEY1); p2 = mt2712_readl(mt2712_rtc, MT2712_POWERKEY2); if (p1 != MT2712_POWERKEY1_KEY || p2 != MT2712_POWERKEY2_KEY) { mt2712_rtc->powerlost = true; dev_dbg(&mt2712_rtc->rtc->dev, "powerkey not set (lost power)\n"); } else { mt2712_rtc->powerlost = false; } /* RTC need POWERKEY1/2 match, then goto normal work mode */ mt2712_writel(mt2712_rtc, MT2712_POWERKEY1, MT2712_POWERKEY1_KEY); mt2712_writel(mt2712_rtc, MT2712_POWERKEY2, MT2712_POWERKEY2_KEY); mt2712_rtc_write_trigger(mt2712_rtc); mt2712_rtc_writeif_unlock(mt2712_rtc); } static const struct rtc_class_ops mt2712_rtc_ops = { .read_time = mt2712_rtc_read_time, .set_time = mt2712_rtc_set_time, .read_alarm = mt2712_rtc_read_alarm, .set_alarm = mt2712_rtc_set_alarm, .alarm_irq_enable = mt2712_rtc_alarm_irq_enable, }; static int mt2712_rtc_probe(struct platform_device *pdev) { struct mt2712_rtc *mt2712_rtc; int ret; mt2712_rtc = devm_kzalloc(&pdev->dev, sizeof(struct mt2712_rtc), GFP_KERNEL); if (!mt2712_rtc) return -ENOMEM; mt2712_rtc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(mt2712_rtc->base)) return PTR_ERR(mt2712_rtc->base); /* rtc hw init */ mt2712_rtc_hw_init(mt2712_rtc); mt2712_rtc->irq = platform_get_irq(pdev, 0); if (mt2712_rtc->irq < 0) return mt2712_rtc->irq; platform_set_drvdata(pdev, mt2712_rtc); mt2712_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(mt2712_rtc->rtc)) return PTR_ERR(mt2712_rtc->rtc); ret = devm_request_threaded_irq(&pdev->dev, mt2712_rtc->irq, NULL, rtc_irq_handler_thread, IRQF_ONESHOT | IRQF_TRIGGER_LOW, dev_name(&mt2712_rtc->rtc->dev), mt2712_rtc); if (ret) { dev_err(&pdev->dev, "Failed to request alarm IRQ: %d: %d\n", mt2712_rtc->irq, ret); return ret; } device_init_wakeup(&pdev->dev, true); mt2712_rtc->rtc->ops = &mt2712_rtc_ops; mt2712_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; mt2712_rtc->rtc->range_max = MT2712_RTC_TIMESTAMP_END_2127; return devm_rtc_register_device(mt2712_rtc->rtc); } #ifdef CONFIG_PM_SLEEP static int mt2712_rtc_suspend(struct device *dev) { int wake_status = 0; struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) { wake_status = enable_irq_wake(mt2712_rtc->irq); if (!wake_status) mt2712_rtc->irq_wake_enabled = true; } return 0; } static int mt2712_rtc_resume(struct device *dev) { int wake_status = 0; struct mt2712_rtc *mt2712_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev) && mt2712_rtc->irq_wake_enabled) { wake_status = disable_irq_wake(mt2712_rtc->irq); if (!wake_status) mt2712_rtc->irq_wake_enabled = false; } return 0; } static SIMPLE_DEV_PM_OPS(mt2712_pm_ops, mt2712_rtc_suspend, mt2712_rtc_resume); #endif static const struct of_device_id mt2712_rtc_of_match[] = { { .compatible = "mediatek,mt2712-rtc", }, { }, }; MODULE_DEVICE_TABLE(of, mt2712_rtc_of_match); static struct platform_driver mt2712_rtc_driver = { .driver = { .name = "mt2712-rtc", .of_match_table = mt2712_rtc_of_match, #ifdef CONFIG_PM_SLEEP .pm = &mt2712_pm_ops, #endif }, .probe = mt2712_rtc_probe, }; module_platform_driver(mt2712_rtc_driver); MODULE_DESCRIPTION("MediaTek MT2712 SoC based RTC Driver"); MODULE_AUTHOR("Ran Bi <[email protected]>"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2019 Google LLC */ /** * DOC: blk-crypto profiles * * 'struct blk_crypto_profile' contains all generic inline encryption-related * state for a particular inline encryption device. blk_crypto_profile serves * as the way that drivers for inline encryption hardware expose their crypto * capabilities and certain functions (e.g., functions to program and evict * keys) to upper layers. Device drivers that want to support inline encryption * construct a crypto profile, then associate it with the disk's request_queue. * * If the device has keyslots, then its blk_crypto_profile also handles managing * these keyslots in a device-independent way, using the driver-provided * functions to program and evict keys as needed. This includes keeping track * of which key and how many I/O requests are using each keyslot, getting * keyslots for I/O requests, and handling key eviction requests. * * For more information, see Documentation/block/inline-encryption.rst. */ #define pr_fmt(fmt) "blk-crypto: " fmt #include <linux/blk-crypto-profile.h> #include <linux/device.h> #include <linux/atomic.h> #include <linux/mutex.h> #include <linux/pm_runtime.h> #include <linux/wait.h> #include <linux/blkdev.h> #include <linux/blk-integrity.h> #include "blk-crypto-internal.h" struct blk_crypto_keyslot { atomic_t slot_refs; struct list_head idle_slot_node; struct hlist_node hash_node; const struct blk_crypto_key *key; struct blk_crypto_profile *profile; }; static inline void blk_crypto_hw_enter(struct blk_crypto_profile *profile) { /* * Calling into the driver requires profile->lock held and the device * resumed. But we must resume the device first, since that can acquire * and release profile->lock via blk_crypto_reprogram_all_keys(). */ if (profile->dev) pm_runtime_get_sync(profile->dev); down_write(&profile->lock); } static inline void blk_crypto_hw_exit(struct blk_crypto_profile *profile) { up_write(&profile->lock); if (profile->dev) pm_runtime_put_sync(profile->dev); } /** * blk_crypto_profile_init() - Initialize a blk_crypto_profile * @profile: the blk_crypto_profile to initialize * @num_slots: the number of keyslots * * Storage drivers must call this when starting to set up a blk_crypto_profile, * before filling in additional fields. * * Return: 0 on success, or else a negative error code. */ int blk_crypto_profile_init(struct blk_crypto_profile *profile, unsigned int num_slots) { unsigned int slot; unsigned int i; unsigned int slot_hashtable_size; memset(profile, 0, sizeof(*profile)); /* * profile->lock of an underlying device can nest inside profile->lock * of a device-mapper device, so use a dynamic lock class to avoid * false-positive lockdep reports. */ lockdep_register_key(&profile->lockdep_key); __init_rwsem(&profile->lock, "&profile->lock", &profile->lockdep_key); if (num_slots == 0) return 0; /* Initialize keyslot management data. */ profile->slots = kvcalloc(num_slots, sizeof(profile->slots[0]), GFP_KERNEL); if (!profile->slots) goto err_destroy; profile->num_slots = num_slots; init_waitqueue_head(&profile->idle_slots_wait_queue); INIT_LIST_HEAD(&profile->idle_slots); for (slot = 0; slot < num_slots; slot++) { profile->slots[slot].profile = profile; list_add_tail(&profile->slots[slot].idle_slot_node, &profile->idle_slots); } spin_lock_init(&profile->idle_slots_lock); slot_hashtable_size = roundup_pow_of_two(num_slots); /* * hash_ptr() assumes bits != 0, so ensure the hash table has at least 2 * buckets. This only makes a difference when there is only 1 keyslot. */ if (slot_hashtable_size < 2) slot_hashtable_size = 2; profile->log_slot_ht_size = ilog2(slot_hashtable_size); profile->slot_hashtable = kvmalloc_array(slot_hashtable_size, sizeof(profile->slot_hashtable[0]), GFP_KERNEL); if (!profile->slot_hashtable) goto err_destroy; for (i = 0; i < slot_hashtable_size; i++) INIT_HLIST_HEAD(&profile->slot_hashtable[i]); return 0; err_destroy: blk_crypto_profile_destroy(profile); return -ENOMEM; } EXPORT_SYMBOL_GPL(blk_crypto_profile_init); static void blk_crypto_profile_destroy_callback(void *profile) { blk_crypto_profile_destroy(profile); } /** * devm_blk_crypto_profile_init() - Resource-managed blk_crypto_profile_init() * @dev: the device which owns the blk_crypto_profile * @profile: the blk_crypto_profile to initialize * @num_slots: the number of keyslots * * Like blk_crypto_profile_init(), but causes blk_crypto_profile_destroy() to be * called automatically on driver detach. * * Return: 0 on success, or else a negative error code. */ int devm_blk_crypto_profile_init(struct device *dev, struct blk_crypto_profile *profile, unsigned int num_slots) { int err = blk_crypto_profile_init(profile, num_slots); if (err) return err; return devm_add_action_or_reset(dev, blk_crypto_profile_destroy_callback, profile); } EXPORT_SYMBOL_GPL(devm_blk_crypto_profile_init); static inline struct hlist_head * blk_crypto_hash_bucket_for_key(struct blk_crypto_profile *profile, const struct blk_crypto_key *key) { return &profile->slot_hashtable[ hash_ptr(key, profile->log_slot_ht_size)]; } static void blk_crypto_remove_slot_from_lru_list(struct blk_crypto_keyslot *slot) { struct blk_crypto_profile *profile = slot->profile; unsigned long flags; spin_lock_irqsave(&profile->idle_slots_lock, flags); list_del(&slot->idle_slot_node); spin_unlock_irqrestore(&profile->idle_slots_lock, flags); } static struct blk_crypto_keyslot * blk_crypto_find_keyslot(struct blk_crypto_profile *profile, const struct blk_crypto_key *key) { const struct hlist_head *head = blk_crypto_hash_bucket_for_key(profile, key); struct blk_crypto_keyslot *slotp; hlist_for_each_entry(slotp, head, hash_node) { if (slotp->key == key) return slotp; } return NULL; } static struct blk_crypto_keyslot * blk_crypto_find_and_grab_keyslot(struct blk_crypto_profile *profile, const struct blk_crypto_key *key) { struct blk_crypto_keyslot *slot; slot = blk_crypto_find_keyslot(profile, key); if (!slot) return NULL; if (atomic_inc_return(&slot->slot_refs) == 1) { /* Took first reference to this slot; remove it from LRU list */ blk_crypto_remove_slot_from_lru_list(slot); } return slot; } /** * blk_crypto_keyslot_index() - Get the index of a keyslot * @slot: a keyslot that blk_crypto_get_keyslot() returned * * Return: the 0-based index of the keyslot within the device's keyslots. */ unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot) { return slot - slot->profile->slots; } EXPORT_SYMBOL_GPL(blk_crypto_keyslot_index); /** * blk_crypto_get_keyslot() - Get a keyslot for a key, if needed. * @profile: the crypto profile of the device the key will be used on * @key: the key that will be used * @slot_ptr: If a keyslot is allocated, an opaque pointer to the keyslot struct * will be stored here. blk_crypto_put_keyslot() must be called * later to release it. Otherwise, NULL will be stored here. * * If the device has keyslots, this gets a keyslot that's been programmed with * the specified key. If the key is already in a slot, this reuses it; * otherwise this waits for a slot to become idle and programs the key into it. * * Context: Process context. Takes and releases profile->lock. * Return: BLK_STS_OK on success, meaning that either a keyslot was allocated or * one wasn't needed; or a blk_status_t error on failure. */ blk_status_t blk_crypto_get_keyslot(struct blk_crypto_profile *profile, const struct blk_crypto_key *key, struct blk_crypto_keyslot **slot_ptr) { struct blk_crypto_keyslot *slot; int slot_idx; int err; *slot_ptr = NULL; /* * If the device has no concept of "keyslots", then there is no need to * get one. */ if (profile->num_slots == 0) return BLK_STS_OK; down_read(&profile->lock); slot = blk_crypto_find_and_grab_keyslot(profile, key); up_read(&profile->lock); if (slot) goto success; for (;;) { blk_crypto_hw_enter(profile); slot = blk_crypto_find_and_grab_keyslot(profile, key); if (slot) { blk_crypto_hw_exit(profile); goto success; } /* * If we're here, that means there wasn't a slot that was * already programmed with the key. So try to program it. */ if (!list_empty(&profile->idle_slots)) break; blk_crypto_hw_exit(profile); wait_event(profile->idle_slots_wait_queue, !list_empty(&profile->idle_slots)); } slot = list_first_entry(&profile->idle_slots, struct blk_crypto_keyslot, idle_slot_node); slot_idx = blk_crypto_keyslot_index(slot); err = profile->ll_ops.keyslot_program(profile, key, slot_idx); if (err) { wake_up(&profile->idle_slots_wait_queue); blk_crypto_hw_exit(profile); return errno_to_blk_status(err); } /* Move this slot to the hash list for the new key. */ if (slot->key) hlist_del(&slot->hash_node); slot->key = key; hlist_add_head(&slot->hash_node, blk_crypto_hash_bucket_for_key(profile, key)); atomic_set(&slot->slot_refs, 1); blk_crypto_remove_slot_from_lru_list(slot); blk_crypto_hw_exit(profile); success: *slot_ptr = slot; return BLK_STS_OK; } /** * blk_crypto_put_keyslot() - Release a reference to a keyslot * @slot: The keyslot to release the reference of * * Context: Any context. */ void blk_crypto_put_keyslot(struct blk_crypto_keyslot *slot) { struct blk_crypto_profile *profile = slot->profile; unsigned long flags; if (atomic_dec_and_lock_irqsave(&slot->slot_refs, &profile->idle_slots_lock, flags)) { list_add_tail(&slot->idle_slot_node, &profile->idle_slots); spin_unlock_irqrestore(&profile->idle_slots_lock, flags); wake_up(&profile->idle_slots_wait_queue); } } /** * __blk_crypto_cfg_supported() - Check whether the given crypto profile * supports the given crypto configuration. * @profile: the crypto profile to check * @cfg: the crypto configuration to check for * * Return: %true if @profile supports the given @cfg. */ bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile, const struct blk_crypto_config *cfg) { if (!profile) return false; if (!(profile->modes_supported[cfg->crypto_mode] & cfg->data_unit_size)) return false; if (profile->max_dun_bytes_supported < cfg->dun_bytes) return false; return true; } /* * This is an internal function that evicts a key from an inline encryption * device that can be either a real device or the blk-crypto-fallback "device". * It is used only by blk_crypto_evict_key(); see that function for details. */ int __blk_crypto_evict_key(struct blk_crypto_profile *profile, const struct blk_crypto_key *key) { struct blk_crypto_keyslot *slot; int err; if (profile->num_slots == 0) { if (profile->ll_ops.keyslot_evict) { blk_crypto_hw_enter(profile); err = profile->ll_ops.keyslot_evict(profile, key, -1); blk_crypto_hw_exit(profile); return err; } return 0; } blk_crypto_hw_enter(profile); slot = blk_crypto_find_keyslot(profile, key); if (!slot) { /* * Not an error, since a key not in use by I/O is not guaranteed * to be in a keyslot. There can be more keys than keyslots. */ err = 0; goto out; } if (WARN_ON_ONCE(atomic_read(&slot->slot_refs) != 0)) { /* BUG: key is still in use by I/O */ err = -EBUSY; goto out_remove; } err = profile->ll_ops.keyslot_evict(profile, key, blk_crypto_keyslot_index(slot)); out_remove: /* * Callers free the key even on error, so unlink the key from the hash * table and clear slot->key even on error. */ hlist_del(&slot->hash_node); slot->key = NULL; out: blk_crypto_hw_exit(profile); return err; } /** * blk_crypto_reprogram_all_keys() - Re-program all keyslots. * @profile: The crypto profile * * Re-program all keyslots that are supposed to have a key programmed. This is * intended only for use by drivers for hardware that loses its keys on reset. * * Context: Process context. Takes and releases profile->lock. */ void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile) { unsigned int slot; if (profile->num_slots == 0) return; /* This is for device initialization, so don't resume the device */ down_write(&profile->lock); for (slot = 0; slot < profile->num_slots; slot++) { const struct blk_crypto_key *key = profile->slots[slot].key; int err; if (!key) continue; err = profile->ll_ops.keyslot_program(profile, key, slot); WARN_ON(err); } up_write(&profile->lock); } EXPORT_SYMBOL_GPL(blk_crypto_reprogram_all_keys); void blk_crypto_profile_destroy(struct blk_crypto_profile *profile) { if (!profile) return; lockdep_unregister_key(&profile->lockdep_key); kvfree(profile->slot_hashtable); kvfree_sensitive(profile->slots, sizeof(profile->slots[0]) * profile->num_slots); memzero_explicit(profile, sizeof(*profile)); } EXPORT_SYMBOL_GPL(blk_crypto_profile_destroy); bool blk_crypto_register(struct blk_crypto_profile *profile, struct request_queue *q) { if (blk_integrity_queue_supports_integrity(q)) { pr_warn("Integrity and hardware inline encryption are not supported together. Disabling hardware inline encryption.\n"); return false; } q->crypto_profile = profile; return true; } EXPORT_SYMBOL_GPL(blk_crypto_register); /** * blk_crypto_intersect_capabilities() - restrict supported crypto capabilities * by child device * @parent: the crypto profile for the parent device * @child: the crypto profile for the child device, or NULL * * This clears all crypto capabilities in @parent that aren't set in @child. If * @child is NULL, then this clears all parent capabilities. * * Only use this when setting up the crypto profile for a layered device, before * it's been exposed yet. */ void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent, const struct blk_crypto_profile *child) { if (child) { unsigned int i; parent->max_dun_bytes_supported = min(parent->max_dun_bytes_supported, child->max_dun_bytes_supported); for (i = 0; i < ARRAY_SIZE(child->modes_supported); i++) parent->modes_supported[i] &= child->modes_supported[i]; } else { parent->max_dun_bytes_supported = 0; memset(parent->modes_supported, 0, sizeof(parent->modes_supported)); } } EXPORT_SYMBOL_GPL(blk_crypto_intersect_capabilities); /** * blk_crypto_has_capabilities() - Check whether @target supports at least all * the crypto capabilities that @reference does. * @target: the target profile * @reference: the reference profile * * Return: %true if @target supports all the crypto capabilities of @reference. */ bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target, const struct blk_crypto_profile *reference) { int i; if (!reference) return true; if (!target) return false; for (i = 0; i < ARRAY_SIZE(target->modes_supported); i++) { if (reference->modes_supported[i] & ~target->modes_supported[i]) return false; } if (reference->max_dun_bytes_supported > target->max_dun_bytes_supported) return false; return true; } EXPORT_SYMBOL_GPL(blk_crypto_has_capabilities); /** * blk_crypto_update_capabilities() - Update the capabilities of a crypto * profile to match those of another crypto * profile. * @dst: The crypto profile whose capabilities to update. * @src: The crypto profile whose capabilities this function will update @dst's * capabilities to. * * Blk-crypto requires that crypto capabilities that were * advertised when a bio was created continue to be supported by the * device until that bio is ended. This is turn means that a device cannot * shrink its advertised crypto capabilities without any explicit * synchronization with upper layers. So if there's no such explicit * synchronization, @src must support all the crypto capabilities that * @dst does (i.e. we need blk_crypto_has_capabilities(@src, @dst)). * * Note also that as long as the crypto capabilities are being expanded, the * order of updates becoming visible is not important because it's alright * for blk-crypto to see stale values - they only cause blk-crypto to * believe that a crypto capability isn't supported when it actually is (which * might result in blk-crypto-fallback being used if available, or the bio being * failed). */ void blk_crypto_update_capabilities(struct blk_crypto_profile *dst, const struct blk_crypto_profile *src) { memcpy(dst->modes_supported, src->modes_supported, sizeof(dst->modes_supported)); dst->max_dun_bytes_supported = src->max_dun_bytes_supported; } EXPORT_SYMBOL_GPL(blk_crypto_update_capabilities);
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _LINUX_WORDPART_H #define _LINUX_WORDPART_H /** * upper_32_bits - return bits 32-63 of a number * @n: the number we're accessing * * A basic shift-right of a 64- or 32-bit quantity. Use this to suppress * the "right shift count >= width of type" warning when that quantity is * 32-bits. */ #define upper_32_bits(n) ((u32)(((n) >> 16) >> 16)) /** * lower_32_bits - return bits 0-31 of a number * @n: the number we're accessing */ #define lower_32_bits(n) ((u32)((n) & 0xffffffff)) /** * upper_16_bits - return bits 16-31 of a number * @n: the number we're accessing */ #define upper_16_bits(n) ((u16)((n) >> 16)) /** * lower_16_bits - return bits 0-15 of a number * @n: the number we're accessing */ #define lower_16_bits(n) ((u16)((n) & 0xffff)) /** * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value * @x: value to repeat * * NOTE: @x is not checked for > 0xff; larger values produce odd results. */ #define REPEAT_BYTE(x) ((~0ul / 0xff) * (x)) /** * REPEAT_BYTE_U32 - repeat the value @x multiple times as a u32 value * @x: value to repeat * * NOTE: @x is not checked for > 0xff; larger values produce odd results. */ #define REPEAT_BYTE_U32(x) lower_32_bits(REPEAT_BYTE(x)) /* Set bits in the first 'n' bytes when loaded from memory */ #ifdef __LITTLE_ENDIAN # define aligned_byte_mask(n) ((1UL << 8*(n))-1) #else # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) #endif #endif // _LINUX_WORDPART_H
// SPDX-License-Identifier: (GPL-2.0-or-later OR MIT) /* * Copyright 2020-2021 TQ-Systems GmbH */ /dts-v1/; #include <dt-bindings/phy/phy-imx8-pcie.h> #include "imx8mm-tqma8mqml.dtsi" #include "mba8mx.dtsi" / { model = "TQ-Systems GmbH i.MX8MM TQMa8MxML on MBa8Mx"; compatible = "tq,imx8mm-tqma8mqml-mba8mx", "tq,imx8mm-tqma8mqml", "fsl,imx8mm"; chassis-type = "embedded"; aliases { eeprom0 = &eeprom3; mmc0 = &usdhc3; mmc1 = &usdhc2; mmc2 = &usdhc1; rtc0 = &pcf85063; rtc1 = &snvs_rtc; }; reg_usdhc2_vmmc: regulator-vmmc { compatible = "regulator-fixed"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_reg_usdhc2_vmmc>; regulator-name = "VSD_3V3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpio2 19 GPIO_ACTIVE_HIGH>; enable-active-high; startup-delay-us = <100>; off-on-delay-us = <12000>; }; connector { compatible = "gpio-usb-b-connector", "usb-b-connector"; type = "micro"; label = "X19"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usb1_connector>; id-gpios = <&gpio1 10 GPIO_ACTIVE_HIGH>; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; usb_dr_connector: endpoint { remote-endpoint = <&usb1_drd_sw>; }; }; }; }; }; &i2c1 { expander2: gpio@27 { compatible = "nxp,pca9555"; reg = <0x27>; gpio-controller; #gpio-cells = <2>; vcc-supply = <&reg_vcc_3v3>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_expander>; interrupt-parent = <&gpio1>; interrupts = <9 IRQ_TYPE_EDGE_FALLING>; interrupt-controller; #interrupt-cells = <2>; }; }; &mipi_dsi { samsung,burst-clock-frequency = <891000000>; samsung,esc-clock-frequency = <20000000>; }; &pcie_phy { fsl,refclk-pad-mode = <IMX8_PCIE_REFCLK_PAD_INPUT>; fsl,clkreq-unsupported; clocks = <&pcieclk 2>; clock-names = "ref"; status = "okay"; }; /* PCIe slot on X36 */ &pcie0 { reset-gpio = <&expander0 14 GPIO_ACTIVE_LOW>; clocks = <&clk IMX8MM_CLK_PCIE1_ROOT>, <&pcieclk 3>, <&clk IMX8MM_CLK_PCIE1_AUX>; assigned-clocks = <&clk IMX8MM_CLK_PCIE1_AUX>, <&clk IMX8MM_CLK_PCIE1_CTRL>; assigned-clock-rates = <10000000>, <250000000>; assigned-clock-parents = <&clk IMX8MM_SYS_PLL2_50M>, <&clk IMX8MM_SYS_PLL2_250M>; status = "okay"; }; &sai3 { assigned-clocks = <&clk IMX8MM_CLK_SAI3>; assigned-clock-parents = <&clk IMX8MM_AUDIO_PLL1_OUT>; clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3", "pll8k", "pll11k"; clocks = <&clk IMX8MM_CLK_SAI3_IPG>, <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_SAI3_ROOT>, <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_CLK_DUMMY>, <&clk IMX8MM_AUDIO_PLL1_OUT>, <&clk IMX8MM_AUDIO_PLL2_OUT>; }; &tlv320aic3x04 { clock-names = "mclk"; clocks = <&clk IMX8MM_CLK_SAI3_ROOT>; }; &uart1 { assigned-clocks = <&clk IMX8MM_CLK_UART1>; assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>; }; &uart2 { assigned-clocks = <&clk IMX8MM_CLK_UART2>; assigned-clock-parents = <&clk IMX8MM_SYS_PLL1_80M>; }; &usbotg1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usbotg1>; dr_mode = "otg"; srp-disable; hnp-disable; adp-disable; power-active-high; over-current-active-low; usb-role-switch; status = "okay"; port { usb1_drd_sw: endpoint { remote-endpoint = <&usb_dr_connector>; }; }; }; &usbotg2 { dr_mode = "host"; disable-over-current; vbus-supply = <&reg_hub_vbus>; status = "okay"; }; &iomuxc { pinctrl_ecspi1: ecspi1grp { fsl,pins = <MX8MM_IOMUXC_ECSPI1_SCLK_ECSPI1_SCLK 0x00000006>, <MX8MM_IOMUXC_ECSPI1_MOSI_ECSPI1_MOSI 0x00000006>, <MX8MM_IOMUXC_ECSPI1_MISO_ECSPI1_MISO 0x00000006>, <MX8MM_IOMUXC_ECSPI1_SS0_GPIO5_IO9 0x00000006>; }; pinctrl_ecspi2: ecspi2grp { fsl,pins = <MX8MM_IOMUXC_ECSPI2_SCLK_ECSPI2_SCLK 0x00000006>, <MX8MM_IOMUXC_ECSPI2_MOSI_ECSPI2_MOSI 0x00000006>, <MX8MM_IOMUXC_ECSPI2_MISO_ECSPI2_MISO 0x00000006>, <MX8MM_IOMUXC_ECSPI2_SS0_GPIO5_IO13 0x00000006>; }; pinctrl_expander: expandergrp { fsl,pins = <MX8MM_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x94>; }; pinctrl_fec1: fec1grp { fsl,pins = <MX8MM_IOMUXC_ENET_MDC_ENET1_MDC 0x40000002>, <MX8MM_IOMUXC_ENET_MDIO_ENET1_MDIO 0x40000002>, <MX8MM_IOMUXC_ENET_TD3_ENET1_RGMII_TD3 0x14>, <MX8MM_IOMUXC_ENET_TD2_ENET1_RGMII_TD2 0x14>, <MX8MM_IOMUXC_ENET_TD1_ENET1_RGMII_TD1 0x14>, <MX8MM_IOMUXC_ENET_TD0_ENET1_RGMII_TD0 0x14>, <MX8MM_IOMUXC_ENET_RD3_ENET1_RGMII_RD3 0x90>, <MX8MM_IOMUXC_ENET_RD2_ENET1_RGMII_RD2 0x90>, <MX8MM_IOMUXC_ENET_RD1_ENET1_RGMII_RD1 0x90>, <MX8MM_IOMUXC_ENET_RD0_ENET1_RGMII_RD0 0x90>, <MX8MM_IOMUXC_ENET_TXC_ENET1_RGMII_TXC 0x14>, <MX8MM_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x90>, <MX8MM_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x90>, <MX8MM_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x14>; }; pinctrl_gpiobutton: gpiobuttongrp { fsl,pins = <MX8MM_IOMUXC_GPIO1_IO05_GPIO1_IO5 0x84>, <MX8MM_IOMUXC_GPIO1_IO07_GPIO1_IO7 0x84>, <MX8MM_IOMUXC_SD1_CLK_GPIO2_IO0 0x84>; }; pinctrl_gpioled: gpioledgrp { fsl,pins = <MX8MM_IOMUXC_GPIO1_IO00_GPIO1_IO0 0x84>, <MX8MM_IOMUXC_NAND_DQS_GPIO3_IO14 0x84>; }; pinctrl_i2c2: i2c2grp { fsl,pins = <MX8MM_IOMUXC_I2C2_SCL_I2C2_SCL 0x40000004>, <MX8MM_IOMUXC_I2C2_SDA_I2C2_SDA 0x40000004>; }; pinctrl_i2c2_gpio: i2c2gpiogrp { fsl,pins = <MX8MM_IOMUXC_I2C2_SCL_GPIO5_IO16 0x40000004>, <MX8MM_IOMUXC_I2C2_SDA_GPIO5_IO17 0x40000004>; }; pinctrl_i2c3: i2c3grp { fsl,pins = <MX8MM_IOMUXC_I2C3_SCL_I2C3_SCL 0x40000004>, <MX8MM_IOMUXC_I2C3_SDA_I2C3_SDA 0x40000004>; }; pinctrl_i2c3_gpio: i2c3gpiogrp { fsl,pins = <MX8MM_IOMUXC_I2C3_SCL_GPIO5_IO18 0x40000004>, <MX8MM_IOMUXC_I2C3_SDA_GPIO5_IO19 0x40000004>; }; pinctrl_pwm3: pwm3grp { fsl,pins = <MX8MM_IOMUXC_GPIO1_IO14_PWM3_OUT 0x14>; }; pinctrl_pwm4: pwm4grp { fsl,pins = <MX8MM_IOMUXC_GPIO1_IO15_PWM4_OUT 0x14>; }; pinctrl_sai3: sai3grp { fsl,pins = <MX8MM_IOMUXC_SAI3_MCLK_SAI3_MCLK 0x94>, <MX8MM_IOMUXC_SAI3_RXC_SAI3_RX_BCLK 0x94>, <MX8MM_IOMUXC_SAI3_RXFS_SAI3_RX_SYNC 0x94>, <MX8MM_IOMUXC_SAI3_RXD_SAI3_RX_DATA0 0x94>, <MX8MM_IOMUXC_SAI3_TXFS_SAI3_TX_SYNC 0x94>, <MX8MM_IOMUXC_SAI3_TXD_SAI3_TX_DATA0 0x94>, <MX8MM_IOMUXC_SAI3_TXC_SAI3_TX_BCLK 0x94>; }; pinctrl_uart1: uart1grp { fsl,pins = <MX8MM_IOMUXC_UART1_RXD_UART1_DCE_RX 0x16>, <MX8MM_IOMUXC_UART1_TXD_UART1_DCE_TX 0x16>; }; pinctrl_uart2: uart2grp { fsl,pins = <MX8MM_IOMUXC_UART2_RXD_UART2_DCE_RX 0x16>, <MX8MM_IOMUXC_UART2_TXD_UART2_DCE_TX 0x16>; }; pinctrl_uart3: uart3grp { fsl,pins = <MX8MM_IOMUXC_UART3_RXD_UART3_DCE_RX 0x16>, <MX8MM_IOMUXC_UART3_TXD_UART3_DCE_TX 0x16>; }; pinctrl_uart4: uart4grp { fsl,pins = <MX8MM_IOMUXC_UART4_RXD_UART4_DCE_RX 0x16>, <MX8MM_IOMUXC_UART4_TXD_UART4_DCE_TX 0x16>; }; pinctrl_usbotg1: usbotg1grp { fsl,pins = <MX8MM_IOMUXC_GPIO1_IO12_USB1_OTG_PWR 0x84>, <MX8MM_IOMUXC_GPIO1_IO13_USB1_OTG_OC 0x84>; }; pinctrl_usb1_connector: usb1-connectorgrp { fsl,pins = <MX8MM_IOMUXC_GPIO1_IO10_GPIO1_IO10 0x1c0>; }; pinctrl_usdhc2_gpio: usdhc2grpgpiogrp { fsl,pins = <MX8MM_IOMUXC_SD2_CD_B_GPIO2_IO12 0x84>; }; pinctrl_usdhc2: usdhc2grp { fsl,pins = <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x1d4>, <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4>, <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4>, <MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4>, <MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4>, <MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4>, <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x84>; }; pinctrl_usdhc2_100mhz: usdhc2-100mhzgrp { fsl,pins = <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x1d4>, <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4>, <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4>, <MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4>, <MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4>, <MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4>, <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x84>; }; pinctrl_usdhc2_200mhz: usdhc2-200mhzgrp { fsl,pins = <MX8MM_IOMUXC_SD2_CLK_USDHC2_CLK 0x1d4>, <MX8MM_IOMUXC_SD2_CMD_USDHC2_CMD 0x1d4>, <MX8MM_IOMUXC_SD2_DATA0_USDHC2_DATA0 0x1d4>, <MX8MM_IOMUXC_SD2_DATA1_USDHC2_DATA1 0x1d4>, <MX8MM_IOMUXC_SD2_DATA2_USDHC2_DATA2 0x1d4>, <MX8MM_IOMUXC_SD2_DATA3_USDHC2_DATA3 0x1d4>, <MX8MM_IOMUXC_GPIO1_IO04_USDHC2_VSELECT 0x84>; }; };
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * Definitions for the TCP protocol. * * Version: @(#)tcp.h 1.0.2 04/28/93 * * Author: Fred N. van Kempen, <[email protected]> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #ifndef _UAPI_LINUX_TCP_H #define _UAPI_LINUX_TCP_H #include <linux/types.h> #include <asm/byteorder.h> #include <linux/socket.h> struct tcphdr { __be16 source; __be16 dest; __be32 seq; __be32 ack_seq; #if defined(__LITTLE_ENDIAN_BITFIELD) __u16 res1:4, doff:4, fin:1, syn:1, rst:1, psh:1, ack:1, urg:1, ece:1, cwr:1; #elif defined(__BIG_ENDIAN_BITFIELD) __u16 doff:4, res1:4, cwr:1, ece:1, urg:1, ack:1, psh:1, rst:1, syn:1, fin:1; #else #error "Adjust your <asm/byteorder.h> defines" #endif __be16 window; __sum16 check; __be16 urg_ptr; }; /* * The union cast uses a gcc extension to avoid aliasing problems * (union is compatible to any of its members) * This means this part of the code is -fstrict-aliasing safe now. */ union tcp_word_hdr { struct tcphdr hdr; __be32 words[5]; }; #define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) enum { TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000), TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000), TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000), TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000), TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000), TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000), TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000), TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000), TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000), TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000) }; /* * TCP general constants */ #define TCP_MSS_DEFAULT 536U /* IPv4 (RFC1122, RFC2581) */ #define TCP_MSS_DESIRED 1220U /* IPv6 (tunneled), EDNS0 (RFC3226) */ /* TCP socket options */ #define TCP_NODELAY 1 /* Turn off Nagle's algorithm. */ #define TCP_MAXSEG 2 /* Limit MSS */ #define TCP_CORK 3 /* Never send partially complete segments */ #define TCP_KEEPIDLE 4 /* Start keeplives after this period */ #define TCP_KEEPINTVL 5 /* Interval between keepalives */ #define TCP_KEEPCNT 6 /* Number of keepalives before death */ #define TCP_SYNCNT 7 /* Number of SYN retransmits */ #define TCP_LINGER2 8 /* Life time of orphaned FIN-WAIT-2 state */ #define TCP_DEFER_ACCEPT 9 /* Wake up listener only when data arrive */ #define TCP_WINDOW_CLAMP 10 /* Bound advertised window */ #define TCP_INFO 11 /* Information about this connection. */ #define TCP_QUICKACK 12 /* Block/reenable quick acks */ #define TCP_CONGESTION 13 /* Congestion control algorithm */ #define TCP_MD5SIG 14 /* TCP MD5 Signature (RFC2385) */ #define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ #define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ #define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */ #define TCP_REPAIR 19 /* TCP sock is under repair right now */ #define TCP_REPAIR_QUEUE 20 #define TCP_QUEUE_SEQ 21 #define TCP_REPAIR_OPTIONS 22 #define TCP_FASTOPEN 23 /* Enable FastOpen on listeners */ #define TCP_TIMESTAMP 24 #define TCP_NOTSENT_LOWAT 25 /* limit number of unsent bytes in write queue */ #define TCP_CC_INFO 26 /* Get Congestion Control (optional) info */ #define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */ #define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */ #define TCP_REPAIR_WINDOW 29 /* Get/set window parameters */ #define TCP_FASTOPEN_CONNECT 30 /* Attempt FastOpen with connect */ #define TCP_ULP 31 /* Attach a ULP to a TCP connection */ #define TCP_MD5SIG_EXT 32 /* TCP MD5 Signature with extensions */ #define TCP_FASTOPEN_KEY 33 /* Set the key for Fast Open (cookie) */ #define TCP_FASTOPEN_NO_COOKIE 34 /* Enable TFO without a TFO cookie */ #define TCP_ZEROCOPY_RECEIVE 35 #define TCP_INQ 36 /* Notify bytes available to read as a cmsg on read */ #define TCP_CM_INQ TCP_INQ #define TCP_TX_DELAY 37 /* delay outgoing packets by XX usec */ #define TCP_REPAIR_ON 1 #define TCP_REPAIR_OFF 0 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */ struct tcp_repair_opt { __u32 opt_code; __u32 opt_val; }; struct tcp_repair_window { __u32 snd_wl1; __u32 snd_wnd; __u32 max_window; __u32 rcv_wnd; __u32 rcv_wup; }; enum { TCP_NO_QUEUE, TCP_RECV_QUEUE, TCP_SEND_QUEUE, TCP_QUEUES_NR, }; /* why fastopen failed from client perspective */ enum tcp_fastopen_client_fail { TFO_STATUS_UNSPEC, /* catch-all */ TFO_COOKIE_UNAVAILABLE, /* if not in TFO_CLIENT_NO_COOKIE mode */ TFO_DATA_NOT_ACKED, /* SYN-ACK did not ack SYN data */ TFO_SYN_RETRANSMITTED, /* SYN-ACK did not ack SYN data after timeout */ }; /* for TCP_INFO socket option */ #define TCPI_OPT_TIMESTAMPS 1 #define TCPI_OPT_SACK 2 #define TCPI_OPT_WSCALE 4 #define TCPI_OPT_ECN 8 /* ECN was negociated at TCP session init */ #define TCPI_OPT_ECN_SEEN 16 /* we received at least one packet with ECT */ #define TCPI_OPT_SYN_DATA 32 /* SYN-ACK acked data in SYN sent or rcvd */ /* * Sender's congestion state indicating normal or abnormal situations * in the last round of packets sent. The state is driven by the ACK * information and timer events. */ enum tcp_ca_state { /* * Nothing bad has been observed recently. * No apparent reordering, packet loss, or ECN marks. */ TCP_CA_Open = 0, #define TCPF_CA_Open (1<<TCP_CA_Open) /* * The sender enters disordered state when it has received DUPACKs or * SACKs in the last round of packets sent. This could be due to packet * loss or reordering but needs further information to confirm packets * have been lost. */ TCP_CA_Disorder = 1, #define TCPF_CA_Disorder (1<<TCP_CA_Disorder) /* * The sender enters Congestion Window Reduction (CWR) state when it * has received ACKs with ECN-ECE marks, or has experienced congestion * or packet discard on the sender host (e.g. qdisc). */ TCP_CA_CWR = 2, #define TCPF_CA_CWR (1<<TCP_CA_CWR) /* * The sender is in fast recovery and retransmitting lost packets, * typically triggered by ACK events. */ TCP_CA_Recovery = 3, #define TCPF_CA_Recovery (1<<TCP_CA_Recovery) /* * The sender is in loss recovery triggered by retransmission timeout. */ TCP_CA_Loss = 4 #define TCPF_CA_Loss (1<<TCP_CA_Loss) }; struct tcp_info { __u8 tcpi_state; __u8 tcpi_ca_state; __u8 tcpi_retransmits; __u8 tcpi_probes; __u8 tcpi_backoff; __u8 tcpi_options; __u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4; __u8 tcpi_delivery_rate_app_limited:1, tcpi_fastopen_client_fail:2; __u32 tcpi_rto; __u32 tcpi_ato; __u32 tcpi_snd_mss; __u32 tcpi_rcv_mss; __u32 tcpi_unacked; __u32 tcpi_sacked; __u32 tcpi_lost; __u32 tcpi_retrans; __u32 tcpi_fackets; /* Times. */ __u32 tcpi_last_data_sent; __u32 tcpi_last_ack_sent; /* Not remembered, sorry. */ __u32 tcpi_last_data_recv; __u32 tcpi_last_ack_recv; /* Metrics. */ __u32 tcpi_pmtu; __u32 tcpi_rcv_ssthresh; __u32 tcpi_rtt; __u32 tcpi_rttvar; __u32 tcpi_snd_ssthresh; __u32 tcpi_snd_cwnd; __u32 tcpi_advmss; __u32 tcpi_reordering; __u32 tcpi_rcv_rtt; __u32 tcpi_rcv_space; __u32 tcpi_total_retrans; __u64 tcpi_pacing_rate; __u64 tcpi_max_pacing_rate; __u64 tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */ __u64 tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */ __u32 tcpi_segs_out; /* RFC4898 tcpEStatsPerfSegsOut */ __u32 tcpi_segs_in; /* RFC4898 tcpEStatsPerfSegsIn */ __u32 tcpi_notsent_bytes; __u32 tcpi_min_rtt; __u32 tcpi_data_segs_in; /* RFC4898 tcpEStatsDataSegsIn */ __u32 tcpi_data_segs_out; /* RFC4898 tcpEStatsDataSegsOut */ __u64 tcpi_delivery_rate; __u64 tcpi_busy_time; /* Time (usec) busy sending data */ __u64 tcpi_rwnd_limited; /* Time (usec) limited by receive window */ __u64 tcpi_sndbuf_limited; /* Time (usec) limited by send buffer */ __u32 tcpi_delivered; __u32 tcpi_delivered_ce; __u64 tcpi_bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut */ __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */ __u32 tcpi_dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups */ __u32 tcpi_reord_seen; /* reordering events seen */ __u32 tcpi_rcv_ooopack; /* Out-of-order packets received */ __u32 tcpi_snd_wnd; /* peer's advertised receive window after * scaling (bytes) */ }; /* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */ enum { TCP_NLA_PAD, TCP_NLA_BUSY, /* Time (usec) busy sending data */ TCP_NLA_RWND_LIMITED, /* Time (usec) limited by receive window */ TCP_NLA_SNDBUF_LIMITED, /* Time (usec) limited by send buffer */ TCP_NLA_DATA_SEGS_OUT, /* Data pkts sent including retransmission */ TCP_NLA_TOTAL_RETRANS, /* Data pkts retransmitted */ TCP_NLA_PACING_RATE, /* Pacing rate in bytes per second */ TCP_NLA_DELIVERY_RATE, /* Delivery rate in bytes per second */ TCP_NLA_SND_CWND, /* Sending congestion window */ TCP_NLA_REORDERING, /* Reordering metric */ TCP_NLA_MIN_RTT, /* minimum RTT */ TCP_NLA_RECUR_RETRANS, /* Recurring retransmits for the current pkt */ TCP_NLA_DELIVERY_RATE_APP_LMT, /* delivery rate application limited ? */ TCP_NLA_SNDQ_SIZE, /* Data (bytes) pending in send queue */ TCP_NLA_CA_STATE, /* ca_state of socket */ TCP_NLA_SND_SSTHRESH, /* Slow start size threshold */ TCP_NLA_DELIVERED, /* Data pkts delivered incl. out-of-order */ TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */ TCP_NLA_BYTES_SENT, /* Data bytes sent including retransmission */ TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */ TCP_NLA_DSACK_DUPS, /* DSACK blocks received */ TCP_NLA_REORD_SEEN, /* reordering events seen */ TCP_NLA_SRTT, /* smoothed RTT in usecs */ TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */ TCP_NLA_BYTES_NOTSENT, /* Bytes in write queue not yet sent */ TCP_NLA_EDT, /* Earliest departure time (CLOCK_MONOTONIC) */ }; /* for TCP_MD5SIG socket option */ #define TCP_MD5SIG_MAXKEYLEN 80 /* tcp_md5sig extension flags for TCP_MD5SIG_EXT */ #define TCP_MD5SIG_FLAG_PREFIX 0x1 /* address prefix length */ #define TCP_MD5SIG_FLAG_IFINDEX 0x2 /* ifindex set */ struct tcp_md5sig { struct __kernel_sockaddr_storage tcpm_addr; /* address associated */ __u8 tcpm_flags; /* extension flags */ __u8 tcpm_prefixlen; /* address prefix */ __u16 tcpm_keylen; /* key length */ int tcpm_ifindex; /* device index for scope */ __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */ }; /* INET_DIAG_MD5SIG */ struct tcp_diag_md5sig { __u8 tcpm_family; __u8 tcpm_prefixlen; __u16 tcpm_keylen; __be32 tcpm_addr[4]; __u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; }; /* setsockopt(fd, IPPROTO_TCP, TCP_ZEROCOPY_RECEIVE, ...) */ #define TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT 0x1 struct tcp_zerocopy_receive { __u64 address; /* in: address of mapping */ __u32 length; /* in/out: number of bytes to map/mapped */ __u32 recv_skip_hint; /* out: amount of bytes to skip */ __u32 inq; /* out: amount of bytes in read queue */ __s32 err; /* out: socket error */ __u64 copybuf_address; /* in: copybuf address (small reads) */ __s32 copybuf_len; /* in/out: copybuf bytes avail/used or error */ __u32 flags; /* in: flags */ }; #endif /* _UAPI_LINUX_TCP_H */
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2018 Chelsio Communications, Inc. * * Written by: Atul Gupta ([email protected]) */ #include <linux/module.h> #include <linux/list.h> #include <linux/workqueue.h> #include <linux/skbuff.h> #include <linux/timer.h> #include <linux/notifier.h> #include <linux/inetdevice.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/sched/signal.h> #include <net/tcp.h> #include <net/busy_poll.h> #include <crypto/aes.h> #include "chtls.h" #include "chtls_cm.h" static bool is_tls_tx(struct chtls_sock *csk) { return csk->tlshws.txkey >= 0; } static bool is_tls_rx(struct chtls_sock *csk) { return csk->tlshws.rxkey >= 0; } static int data_sgl_len(const struct sk_buff *skb) { unsigned int cnt; cnt = skb_shinfo(skb)->nr_frags; return sgl_len(cnt) * 8; } static int nos_ivs(struct sock *sk, unsigned int size) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); return DIV_ROUND_UP(size, csk->tlshws.mfs); } static int set_ivs_imm(struct sock *sk, const struct sk_buff *skb) { int ivs_size = nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE; int hlen = TLS_WR_CPL_LEN + data_sgl_len(skb); if ((hlen + KEY_ON_MEM_SZ + ivs_size) < MAX_IMM_OFLD_TX_DATA_WR_LEN) { ULP_SKB_CB(skb)->ulp.tls.iv = 1; return 1; } ULP_SKB_CB(skb)->ulp.tls.iv = 0; return 0; } static int max_ivs_size(struct sock *sk, int size) { return nos_ivs(sk, size) * CIPHER_BLOCK_SIZE; } static int ivs_size(struct sock *sk, const struct sk_buff *skb) { return set_ivs_imm(sk, skb) ? (nos_ivs(sk, skb->len) * CIPHER_BLOCK_SIZE) : 0; } static int flowc_wr_credits(int nparams, int *flowclenp) { int flowclen16, flowclen; flowclen = offsetof(struct fw_flowc_wr, mnemval[nparams]); flowclen16 = DIV_ROUND_UP(flowclen, 16); flowclen = flowclen16 * 16; if (flowclenp) *flowclenp = flowclen; return flowclen16; } static struct sk_buff *create_flowc_wr_skb(struct sock *sk, struct fw_flowc_wr *flowc, int flowclen) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct sk_buff *skb; skb = alloc_skb(flowclen, GFP_ATOMIC); if (!skb) return NULL; __skb_put_data(skb, flowc, flowclen); skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA); return skb; } static int send_flowc_wr(struct sock *sk, struct fw_flowc_wr *flowc, int flowclen) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int flowclen16; int ret; flowclen16 = flowclen / 16; if (csk_flag(sk, CSK_TX_DATA_SENT)) { skb = create_flowc_wr_skb(sk, flowc, flowclen); if (!skb) return -ENOMEM; skb_entail(sk, skb, ULPCB_FLAG_NO_HDR | ULPCB_FLAG_NO_APPEND); return 0; } ret = cxgb4_immdata_send(csk->egress_dev, csk->txq_idx, flowc, flowclen); if (!ret) return flowclen16; skb = create_flowc_wr_skb(sk, flowc, flowclen); if (!skb) return -ENOMEM; send_or_defer(sk, tp, skb, 0); return flowclen16; } static u8 tcp_state_to_flowc_state(u8 state) { switch (state) { case TCP_ESTABLISHED: return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED; case TCP_CLOSE_WAIT: return FW_FLOWC_MNEM_TCPSTATE_CLOSEWAIT; case TCP_FIN_WAIT1: return FW_FLOWC_MNEM_TCPSTATE_FINWAIT1; case TCP_CLOSING: return FW_FLOWC_MNEM_TCPSTATE_CLOSING; case TCP_LAST_ACK: return FW_FLOWC_MNEM_TCPSTATE_LASTACK; case TCP_FIN_WAIT2: return FW_FLOWC_MNEM_TCPSTATE_FINWAIT2; } return FW_FLOWC_MNEM_TCPSTATE_ESTABLISHED; } int send_tx_flowc_wr(struct sock *sk, int compl, u32 snd_nxt, u32 rcv_nxt) { struct flowc_packed { struct fw_flowc_wr fc; struct fw_flowc_mnemval mnemval[FW_FLOWC_MNEM_MAX]; } __packed sflowc; int nparams, paramidx, flowclen16, flowclen; struct fw_flowc_wr *flowc; struct chtls_sock *csk; struct tcp_sock *tp; csk = rcu_dereference_sk_user_data(sk); tp = tcp_sk(sk); memset(&sflowc, 0, sizeof(sflowc)); flowc = &sflowc.fc; #define FLOWC_PARAM(__m, __v) \ do { \ flowc->mnemval[paramidx].mnemonic = FW_FLOWC_MNEM_##__m; \ flowc->mnemval[paramidx].val = cpu_to_be32(__v); \ paramidx++; \ } while (0) paramidx = 0; FLOWC_PARAM(PFNVFN, FW_PFVF_CMD_PFN_V(csk->cdev->lldi->pf)); FLOWC_PARAM(CH, csk->tx_chan); FLOWC_PARAM(PORT, csk->tx_chan); FLOWC_PARAM(IQID, csk->rss_qid); FLOWC_PARAM(SNDNXT, tp->snd_nxt); FLOWC_PARAM(RCVNXT, tp->rcv_nxt); FLOWC_PARAM(SNDBUF, csk->sndbuf); FLOWC_PARAM(MSS, tp->mss_cache); FLOWC_PARAM(TCPSTATE, tcp_state_to_flowc_state(sk->sk_state)); if (SND_WSCALE(tp)) FLOWC_PARAM(RCV_SCALE, SND_WSCALE(tp)); if (csk->ulp_mode == ULP_MODE_TLS) FLOWC_PARAM(ULD_MODE, ULP_MODE_TLS); if (csk->tlshws.fcplenmax) FLOWC_PARAM(TXDATAPLEN_MAX, csk->tlshws.fcplenmax); nparams = paramidx; #undef FLOWC_PARAM flowclen16 = flowc_wr_credits(nparams, &flowclen); flowc->op_to_nparams = cpu_to_be32(FW_WR_OP_V(FW_FLOWC_WR) | FW_WR_COMPL_V(compl) | FW_FLOWC_WR_NPARAMS_V(nparams)); flowc->flowid_len16 = cpu_to_be32(FW_WR_LEN16_V(flowclen16) | FW_WR_FLOWID_V(csk->tid)); return send_flowc_wr(sk, flowc, flowclen); } /* Copy IVs to WR */ static int tls_copy_ivs(struct sock *sk, struct sk_buff *skb) { struct chtls_sock *csk; unsigned char *iv_loc; struct chtls_hws *hws; unsigned char *ivs; u16 number_of_ivs; struct page *page; int err = 0; csk = rcu_dereference_sk_user_data(sk); hws = &csk->tlshws; number_of_ivs = nos_ivs(sk, skb->len); if (number_of_ivs > MAX_IVS_PAGE) { pr_warn("MAX IVs in PAGE exceeded %d\n", number_of_ivs); return -ENOMEM; } /* generate the IVs */ ivs = kmalloc_array(CIPHER_BLOCK_SIZE, number_of_ivs, GFP_ATOMIC); if (!ivs) return -ENOMEM; get_random_bytes(ivs, number_of_ivs * CIPHER_BLOCK_SIZE); if (skb_ulp_tls_iv_imm(skb)) { /* send the IVs as immediate data in the WR */ iv_loc = (unsigned char *)__skb_push(skb, number_of_ivs * CIPHER_BLOCK_SIZE); if (iv_loc) memcpy(iv_loc, ivs, number_of_ivs * CIPHER_BLOCK_SIZE); hws->ivsize = number_of_ivs * CIPHER_BLOCK_SIZE; } else { /* Send the IVs as sgls */ /* Already accounted IV DSGL for credits */ skb_shinfo(skb)->nr_frags--; page = alloc_pages(sk->sk_allocation | __GFP_COMP, 0); if (!page) { pr_info("%s : Page allocation for IVs failed\n", __func__); err = -ENOMEM; goto out; } memcpy(page_address(page), ivs, number_of_ivs * CIPHER_BLOCK_SIZE); skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, page, 0, number_of_ivs * CIPHER_BLOCK_SIZE); hws->ivsize = 0; } out: kfree(ivs); return err; } /* Copy Key to WR */ static void tls_copy_tx_key(struct sock *sk, struct sk_buff *skb) { struct ulptx_sc_memrd *sc_memrd; struct chtls_sock *csk; struct chtls_dev *cdev; struct ulptx_idata *sc; struct chtls_hws *hws; u32 immdlen; int kaddr; csk = rcu_dereference_sk_user_data(sk); hws = &csk->tlshws; cdev = csk->cdev; immdlen = sizeof(*sc) + sizeof(*sc_memrd); kaddr = keyid_to_addr(cdev->kmap.start, hws->txkey); sc = (struct ulptx_idata *)__skb_push(skb, immdlen); if (sc) { sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP)); sc->len = htonl(0); sc_memrd = (struct ulptx_sc_memrd *)(sc + 1); sc_memrd->cmd_to_len = htonl(ULPTX_CMD_V(ULP_TX_SC_MEMRD) | ULP_TX_SC_MORE_V(1) | ULPTX_LEN16_V(hws->keylen >> 4)); sc_memrd->addr = htonl(kaddr); } } static u64 tlstx_incr_seqnum(struct chtls_hws *hws) { return hws->tx_seq_no++; } static bool is_sg_request(const struct sk_buff *skb) { return skb->peeked || (skb->len > MAX_IMM_ULPTX_WR_LEN); } /* * Returns true if an sk_buff carries urgent data. */ static bool skb_urgent(struct sk_buff *skb) { return ULP_SKB_CB(skb)->flags & ULPCB_FLAG_URG; } /* TLS content type for CPL SFO */ static unsigned char tls_content_type(unsigned char content_type) { switch (content_type) { case TLS_HDR_TYPE_CCS: return CPL_TX_TLS_SFO_TYPE_CCS; case TLS_HDR_TYPE_ALERT: return CPL_TX_TLS_SFO_TYPE_ALERT; case TLS_HDR_TYPE_HANDSHAKE: return CPL_TX_TLS_SFO_TYPE_HANDSHAKE; case TLS_HDR_TYPE_HEARTBEAT: return CPL_TX_TLS_SFO_TYPE_HEARTBEAT; } return CPL_TX_TLS_SFO_TYPE_DATA; } static void tls_tx_data_wr(struct sock *sk, struct sk_buff *skb, int dlen, int tls_immd, u32 credits, int expn, int pdus) { struct fw_tlstx_data_wr *req_wr; struct cpl_tx_tls_sfo *req_cpl; unsigned int wr_ulp_mode_force; struct tls_scmd *updated_scmd; unsigned char data_type; struct chtls_sock *csk; struct net_device *dev; struct chtls_hws *hws; struct tls_scmd *scmd; struct adapter *adap; unsigned char *req; int immd_len; int iv_imm; int len; csk = rcu_dereference_sk_user_data(sk); iv_imm = skb_ulp_tls_iv_imm(skb); dev = csk->egress_dev; adap = netdev2adap(dev); hws = &csk->tlshws; scmd = &hws->scmd; len = dlen + expn; dlen = (dlen < hws->mfs) ? dlen : hws->mfs; atomic_inc(&adap->chcr_stats.tls_pdu_tx); updated_scmd = scmd; updated_scmd->seqno_numivs &= 0xffffff80; updated_scmd->seqno_numivs |= SCMD_NUM_IVS_V(pdus); hws->scmd = *updated_scmd; req = (unsigned char *)__skb_push(skb, sizeof(struct cpl_tx_tls_sfo)); req_cpl = (struct cpl_tx_tls_sfo *)req; req = (unsigned char *)__skb_push(skb, (sizeof(struct fw_tlstx_data_wr))); req_wr = (struct fw_tlstx_data_wr *)req; immd_len = (tls_immd ? dlen : 0); req_wr->op_to_immdlen = htonl(FW_WR_OP_V(FW_TLSTX_DATA_WR) | FW_TLSTX_DATA_WR_COMPL_V(1) | FW_TLSTX_DATA_WR_IMMDLEN_V(immd_len)); req_wr->flowid_len16 = htonl(FW_TLSTX_DATA_WR_FLOWID_V(csk->tid) | FW_TLSTX_DATA_WR_LEN16_V(credits)); wr_ulp_mode_force = TX_ULP_MODE_V(ULP_MODE_TLS); if (is_sg_request(skb)) wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F | ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 : FW_OFLD_TX_DATA_WR_SHOVE_F); req_wr->lsodisable_to_flags = htonl(TX_ULP_MODE_V(ULP_MODE_TLS) | TX_URG_V(skb_urgent(skb)) | T6_TX_FORCE_F | wr_ulp_mode_force | TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) && skb_queue_empty(&csk->txq))); req_wr->ctxloc_to_exp = htonl(FW_TLSTX_DATA_WR_NUMIVS_V(pdus) | FW_TLSTX_DATA_WR_EXP_V(expn) | FW_TLSTX_DATA_WR_CTXLOC_V(CHTLS_KEY_CONTEXT_DDR) | FW_TLSTX_DATA_WR_IVDSGL_V(!iv_imm) | FW_TLSTX_DATA_WR_KEYSIZE_V(hws->keylen >> 4)); /* Fill in the length */ req_wr->plen = htonl(len); req_wr->mfs = htons(hws->mfs); req_wr->adjustedplen_pkd = htons(FW_TLSTX_DATA_WR_ADJUSTEDPLEN_V(hws->adjustlen)); req_wr->expinplenmax_pkd = htons(FW_TLSTX_DATA_WR_EXPINPLENMAX_V(hws->expansion)); req_wr->pdusinplenmax_pkd = FW_TLSTX_DATA_WR_PDUSINPLENMAX_V(hws->pdus); req_wr->r10 = 0; data_type = tls_content_type(ULP_SKB_CB(skb)->ulp.tls.type); req_cpl->op_to_seg_len = htonl(CPL_TX_TLS_SFO_OPCODE_V(CPL_TX_TLS_SFO) | CPL_TX_TLS_SFO_DATA_TYPE_V(data_type) | CPL_TX_TLS_SFO_CPL_LEN_V(2) | CPL_TX_TLS_SFO_SEG_LEN_V(dlen)); req_cpl->pld_len = htonl(len - expn); req_cpl->type_protover = htonl(CPL_TX_TLS_SFO_TYPE_V ((data_type == CPL_TX_TLS_SFO_TYPE_HEARTBEAT) ? TLS_HDR_TYPE_HEARTBEAT : 0) | CPL_TX_TLS_SFO_PROTOVER_V(0)); /* create the s-command */ req_cpl->r1_lo = 0; req_cpl->seqno_numivs = cpu_to_be32(hws->scmd.seqno_numivs); req_cpl->ivgen_hdrlen = cpu_to_be32(hws->scmd.ivgen_hdrlen); req_cpl->scmd1 = cpu_to_be64(tlstx_incr_seqnum(hws)); } /* * Calculate the TLS data expansion size */ static int chtls_expansion_size(struct sock *sk, int data_len, int fullpdu, unsigned short *pducnt) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct chtls_hws *hws = &csk->tlshws; struct tls_scmd *scmd = &hws->scmd; int fragsize = hws->mfs; int expnsize = 0; int fragleft; int fragcnt; int expppdu; if (SCMD_CIPH_MODE_G(scmd->seqno_numivs) == SCMD_CIPH_MODE_AES_GCM) { expppdu = GCM_TAG_SIZE + AEAD_EXPLICIT_DATA_SIZE + TLS_HEADER_LENGTH; if (fullpdu) { *pducnt = data_len / (expppdu + fragsize); if (*pducnt > 32) *pducnt = 32; else if (!*pducnt) *pducnt = 1; expnsize = (*pducnt) * expppdu; return expnsize; } fragcnt = (data_len / fragsize); expnsize = fragcnt * expppdu; fragleft = data_len % fragsize; if (fragleft > 0) expnsize += expppdu; } return expnsize; } /* WR with IV, KEY and CPL SFO added */ static void make_tlstx_data_wr(struct sock *sk, struct sk_buff *skb, int tls_tx_imm, int tls_len, u32 credits) { unsigned short pdus_per_ulp = 0; struct chtls_sock *csk; struct chtls_hws *hws; int expn_sz; int pdus; csk = rcu_dereference_sk_user_data(sk); hws = &csk->tlshws; pdus = DIV_ROUND_UP(tls_len, hws->mfs); expn_sz = chtls_expansion_size(sk, tls_len, 0, NULL); if (!hws->compute) { hws->expansion = chtls_expansion_size(sk, hws->fcplenmax, 1, &pdus_per_ulp); hws->pdus = pdus_per_ulp; hws->adjustlen = hws->pdus * ((hws->expansion / hws->pdus) + hws->mfs); hws->compute = 1; } if (tls_copy_ivs(sk, skb)) return; tls_copy_tx_key(sk, skb); tls_tx_data_wr(sk, skb, tls_len, tls_tx_imm, credits, expn_sz, pdus); hws->tx_seq_no += (pdus - 1); } static void make_tx_data_wr(struct sock *sk, struct sk_buff *skb, unsigned int immdlen, int len, u32 credits, u32 compl) { struct fw_ofld_tx_data_wr *req; unsigned int wr_ulp_mode_force; struct chtls_sock *csk; unsigned int opcode; csk = rcu_dereference_sk_user_data(sk); opcode = FW_OFLD_TX_DATA_WR; req = (struct fw_ofld_tx_data_wr *)__skb_push(skb, sizeof(*req)); req->op_to_immdlen = htonl(WR_OP_V(opcode) | FW_WR_COMPL_V(compl) | FW_WR_IMMDLEN_V(immdlen)); req->flowid_len16 = htonl(FW_WR_FLOWID_V(csk->tid) | FW_WR_LEN16_V(credits)); wr_ulp_mode_force = TX_ULP_MODE_V(csk->ulp_mode); if (is_sg_request(skb)) wr_ulp_mode_force |= FW_OFLD_TX_DATA_WR_ALIGNPLD_F | ((tcp_sk(sk)->nonagle & TCP_NAGLE_OFF) ? 0 : FW_OFLD_TX_DATA_WR_SHOVE_F); req->tunnel_to_proxy = htonl(wr_ulp_mode_force | TX_URG_V(skb_urgent(skb)) | TX_SHOVE_V((!csk_flag(sk, CSK_TX_MORE_DATA)) && skb_queue_empty(&csk->txq))); req->plen = htonl(len); } static int chtls_wr_size(struct chtls_sock *csk, const struct sk_buff *skb, bool size) { int wr_size; wr_size = TLS_WR_CPL_LEN; wr_size += KEY_ON_MEM_SZ; wr_size += ivs_size(csk->sk, skb); if (size) return wr_size; /* frags counted for IV dsgl */ if (!skb_ulp_tls_iv_imm(skb)) skb_shinfo(skb)->nr_frags++; return wr_size; } static bool is_ofld_imm(struct chtls_sock *csk, const struct sk_buff *skb) { int length = skb->len; if (skb->peeked || skb->len > MAX_IMM_ULPTX_WR_LEN) return false; if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) { /* Check TLS header len for Immediate */ if (csk->ulp_mode == ULP_MODE_TLS && skb_ulp_tls_inline(skb)) length += chtls_wr_size(csk, skb, true); else length += sizeof(struct fw_ofld_tx_data_wr); return length <= MAX_IMM_OFLD_TX_DATA_WR_LEN; } return true; } static unsigned int calc_tx_flits(const struct sk_buff *skb, unsigned int immdlen) { unsigned int flits, cnt; flits = immdlen / 8; /* headers */ cnt = skb_shinfo(skb)->nr_frags; if (skb_tail_pointer(skb) != skb_transport_header(skb)) cnt++; return flits + sgl_len(cnt); } static void arp_failure_discard(void *handle, struct sk_buff *skb) { kfree_skb(skb); } int chtls_push_frames(struct chtls_sock *csk, int comp) { struct chtls_hws *hws = &csk->tlshws; struct tcp_sock *tp; struct sk_buff *skb; int total_size = 0; struct sock *sk; int wr_size; wr_size = sizeof(struct fw_ofld_tx_data_wr); sk = csk->sk; tp = tcp_sk(sk); if (unlikely(sk_in_state(sk, TCPF_SYN_SENT | TCPF_CLOSE))) return 0; if (unlikely(csk_flag(sk, CSK_ABORT_SHUTDOWN))) return 0; while (csk->wr_credits && (skb = skb_peek(&csk->txq)) && (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_HOLD) || skb_queue_len(&csk->txq) > 1)) { unsigned int credit_len = skb->len; unsigned int credits_needed; unsigned int completion = 0; int tls_len = skb->len;/* TLS data len before IV/key */ unsigned int immdlen; int len = skb->len; /* length [ulp bytes] inserted by hw */ int flowclen16 = 0; int tls_tx_imm = 0; immdlen = skb->len; if (!is_ofld_imm(csk, skb)) { immdlen = skb_transport_offset(skb); if (skb_ulp_tls_inline(skb)) wr_size = chtls_wr_size(csk, skb, false); credit_len = 8 * calc_tx_flits(skb, immdlen); } else { if (skb_ulp_tls_inline(skb)) { wr_size = chtls_wr_size(csk, skb, false); tls_tx_imm = 1; } } if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) credit_len += wr_size; credits_needed = DIV_ROUND_UP(credit_len, 16); if (!csk_flag_nochk(csk, CSK_TX_DATA_SENT)) { flowclen16 = send_tx_flowc_wr(sk, 1, tp->snd_nxt, tp->rcv_nxt); if (flowclen16 <= 0) break; csk->wr_credits -= flowclen16; csk->wr_unacked += flowclen16; csk->wr_nondata += flowclen16; csk_set_flag(csk, CSK_TX_DATA_SENT); } if (csk->wr_credits < credits_needed) { if (skb_ulp_tls_inline(skb) && !skb_ulp_tls_iv_imm(skb)) skb_shinfo(skb)->nr_frags--; break; } __skb_unlink(skb, &csk->txq); skb_set_queue_mapping(skb, (csk->txq_idx << 1) | CPL_PRIORITY_DATA); if (hws->ofld) hws->txqid = (skb->queue_mapping >> 1); skb->csum = (__force __wsum)(credits_needed + csk->wr_nondata); csk->wr_credits -= credits_needed; csk->wr_unacked += credits_needed; csk->wr_nondata = 0; enqueue_wr(csk, skb); if (likely(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NEED_HDR)) { if ((comp && csk->wr_unacked == credits_needed) || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) || csk->wr_unacked >= csk->wr_max_credits / 2) { completion = 1; csk->wr_unacked = 0; } if (skb_ulp_tls_inline(skb)) make_tlstx_data_wr(sk, skb, tls_tx_imm, tls_len, credits_needed); else make_tx_data_wr(sk, skb, immdlen, len, credits_needed, completion); tp->snd_nxt += len; tp->lsndtime = tcp_jiffies32; if (completion) ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_NEED_HDR; } else { struct cpl_close_con_req *req = cplhdr(skb); unsigned int cmd = CPL_OPCODE_G(ntohl (OPCODE_TID(req))); if (cmd == CPL_CLOSE_CON_REQ) csk_set_flag(csk, CSK_CLOSE_CON_REQUESTED); if ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_COMPL) && (csk->wr_unacked >= csk->wr_max_credits / 2)) { req->wr.wr_hi |= htonl(FW_WR_COMPL_F); csk->wr_unacked = 0; } } total_size += skb->truesize; if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_BARRIER) csk_set_flag(csk, CSK_TX_WAIT_IDLE); t4_set_arp_err_handler(skb, NULL, arp_failure_discard); cxgb4_l2t_send(csk->egress_dev, skb, csk->l2t_entry); } sk->sk_wmem_queued -= total_size; return total_size; } static void mark_urg(struct tcp_sock *tp, int flags, struct sk_buff *skb) { if (unlikely(flags & MSG_OOB)) { tp->snd_up = tp->write_seq; ULP_SKB_CB(skb)->flags = ULPCB_FLAG_URG | ULPCB_FLAG_BARRIER | ULPCB_FLAG_NO_APPEND | ULPCB_FLAG_NEED_HDR; } } /* * Returns true if a connection should send more data to TCP engine */ static bool should_push(struct sock *sk) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct chtls_dev *cdev = csk->cdev; struct tcp_sock *tp = tcp_sk(sk); /* * If we've released our offload resources there's nothing to do ... */ if (!cdev) return false; /* * If there aren't any work requests in flight, or there isn't enough * data in flight, or Nagle is off then send the current TX_DATA * otherwise hold it and wait to accumulate more data. */ return csk->wr_credits == csk->wr_max_credits || (tp->nonagle & TCP_NAGLE_OFF); } /* * Returns true if a TCP socket is corked. */ static bool corked(const struct tcp_sock *tp, int flags) { return (flags & MSG_MORE) || (tp->nonagle & TCP_NAGLE_CORK); } /* * Returns true if a send should try to push new data. */ static bool send_should_push(struct sock *sk, int flags) { return should_push(sk) && !corked(tcp_sk(sk), flags); } void chtls_tcp_push(struct sock *sk, int flags) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); int qlen = skb_queue_len(&csk->txq); if (likely(qlen)) { struct sk_buff *skb = skb_peek_tail(&csk->txq); struct tcp_sock *tp = tcp_sk(sk); mark_urg(tp, flags, skb); if (!(ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) && corked(tp, flags)) { ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_HOLD; return; } ULP_SKB_CB(skb)->flags &= ~ULPCB_FLAG_HOLD; if (qlen == 1 && ((ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) || should_push(sk))) chtls_push_frames(csk, 1); } } /* * Calculate the size for a new send sk_buff. It's maximum size so we can * pack lots of data into it, unless we plan to send it immediately, in which * case we size it more tightly. * * Note: we don't bother compensating for MSS < PAGE_SIZE because it doesn't * arise in normal cases and when it does we are just wasting memory. */ static int select_size(struct sock *sk, int io_len, int flags, int len) { const int pgbreak = SKB_MAX_HEAD(len); /* * If the data wouldn't fit in the main body anyway, put only the * header in the main body so it can use immediate data and place all * the payload in page fragments. */ if (io_len > pgbreak) return 0; /* * If we will be accumulating payload get a large main body. */ if (!send_should_push(sk, flags)) return pgbreak; return io_len; } void skb_entail(struct sock *sk, struct sk_buff *skb, int flags) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct tcp_sock *tp = tcp_sk(sk); ULP_SKB_CB(skb)->seq = tp->write_seq; ULP_SKB_CB(skb)->flags = flags; __skb_queue_tail(&csk->txq, skb); sk->sk_wmem_queued += skb->truesize; if (TCP_PAGE(sk) && TCP_OFF(sk)) { put_page(TCP_PAGE(sk)); TCP_PAGE(sk) = NULL; TCP_OFF(sk) = 0; } } static struct sk_buff *get_tx_skb(struct sock *sk, int size) { struct sk_buff *skb; skb = alloc_skb(size + TX_HEADER_LEN, sk->sk_allocation); if (likely(skb)) { skb_reserve(skb, TX_HEADER_LEN); skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR); skb_reset_transport_header(skb); } return skb; } static struct sk_buff *get_record_skb(struct sock *sk, int size, bool zcopy) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct sk_buff *skb; skb = alloc_skb(((zcopy ? 0 : size) + TX_TLSHDR_LEN + KEY_ON_MEM_SZ + max_ivs_size(sk, size)), sk->sk_allocation); if (likely(skb)) { skb_reserve(skb, (TX_TLSHDR_LEN + KEY_ON_MEM_SZ + max_ivs_size(sk, size))); skb_entail(sk, skb, ULPCB_FLAG_NEED_HDR); skb_reset_transport_header(skb); ULP_SKB_CB(skb)->ulp.tls.ofld = 1; ULP_SKB_CB(skb)->ulp.tls.type = csk->tlshws.type; } return skb; } static void tx_skb_finalize(struct sk_buff *skb) { struct ulp_skb_cb *cb = ULP_SKB_CB(skb); if (!(cb->flags & ULPCB_FLAG_NO_HDR)) cb->flags = ULPCB_FLAG_NEED_HDR; cb->flags |= ULPCB_FLAG_NO_APPEND; } static void push_frames_if_head(struct sock *sk) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); if (skb_queue_len(&csk->txq) == 1) chtls_push_frames(csk, 1); } static int chtls_skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *from, struct sk_buff *skb, struct page *page, int off, int copy) { int err; err = skb_do_copy_data_nocache(sk, skb, from, page_address(page) + off, copy, skb->len); if (err) return err; skb->len += copy; skb->data_len += copy; skb->truesize += copy; sk->sk_wmem_queued += copy; return 0; } static bool csk_mem_free(struct chtls_dev *cdev, struct sock *sk) { return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0); } static int csk_wait_memory(struct chtls_dev *cdev, struct sock *sk, long *timeo_p) { DEFINE_WAIT_FUNC(wait, woken_wake_function); int ret, err = 0; long current_timeo; long vm_wait = 0; bool noblock; current_timeo = *timeo_p; noblock = (*timeo_p ? false : true); if (csk_mem_free(cdev, sk)) { current_timeo = get_random_u32_below(HZ / 5) + 2; vm_wait = get_random_u32_below(HZ / 5) + 2; } add_wait_queue(sk_sleep(sk), &wait); while (1) { sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto do_error; if (!*timeo_p) { if (noblock) set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); goto do_nonblock; } if (signal_pending(current)) goto do_interrupted; sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); if (csk_mem_free(cdev, sk) && !vm_wait) break; set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); sk->sk_write_pending++; ret = sk_wait_event(sk, &current_timeo, sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN) || (csk_mem_free(cdev, sk) && !vm_wait), &wait); sk->sk_write_pending--; if (ret < 0) goto do_error; if (vm_wait) { vm_wait -= current_timeo; current_timeo = *timeo_p; if (current_timeo != MAX_SCHEDULE_TIMEOUT) { current_timeo -= vm_wait; if (current_timeo < 0) current_timeo = 0; } vm_wait = 0; } *timeo_p = current_timeo; } do_rm_wq: remove_wait_queue(sk_sleep(sk), &wait); return err; do_error: err = -EPIPE; goto do_rm_wq; do_nonblock: err = -EAGAIN; goto do_rm_wq; do_interrupted: err = sock_intr_errno(*timeo_p); goto do_rm_wq; } static int chtls_proccess_cmsg(struct sock *sk, struct msghdr *msg, unsigned char *record_type) { struct cmsghdr *cmsg; int rc = -EINVAL; for_each_cmsghdr(cmsg, msg) { if (!CMSG_OK(msg, cmsg)) return -EINVAL; if (cmsg->cmsg_level != SOL_TLS) continue; switch (cmsg->cmsg_type) { case TLS_SET_RECORD_TYPE: if (cmsg->cmsg_len < CMSG_LEN(sizeof(*record_type))) return -EINVAL; if (msg->msg_flags & MSG_MORE) return -EINVAL; *record_type = *(unsigned char *)CMSG_DATA(cmsg); rc = 0; break; default: return -EINVAL; } } return rc; } int chtls_sendmsg(struct sock *sk, struct msghdr *msg, size_t size) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct chtls_dev *cdev = csk->cdev; struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int mss, flags, err; int recordsz = 0; int copied = 0; long timeo; lock_sock(sk); flags = msg->msg_flags; timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); if (!sk_in_state(sk, TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { err = sk_stream_wait_connect(sk, &timeo); if (err) goto out_err; } sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); err = -EPIPE; if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto out_err; mss = csk->mss; csk_set_flag(csk, CSK_TX_MORE_DATA); while (msg_data_left(msg)) { int copy = 0; skb = skb_peek_tail(&csk->txq); if (skb) { copy = mss - skb->len; skb->ip_summed = CHECKSUM_UNNECESSARY; } if (!csk_mem_free(cdev, sk)) goto wait_for_sndbuf; if (is_tls_tx(csk) && !csk->tlshws.txleft) { unsigned char record_type = TLS_RECORD_TYPE_DATA; if (unlikely(msg->msg_controllen)) { err = chtls_proccess_cmsg(sk, msg, &record_type); if (err) goto out_err; /* Avoid appending tls handshake, alert to tls data */ if (skb) tx_skb_finalize(skb); } recordsz = size; csk->tlshws.txleft = recordsz; csk->tlshws.type = record_type; } if (!skb || (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) || copy <= 0) { new_buf: if (skb) { tx_skb_finalize(skb); push_frames_if_head(sk); } if (is_tls_tx(csk)) { skb = get_record_skb(sk, select_size(sk, recordsz, flags, TX_TLSHDR_LEN), false); } else { skb = get_tx_skb(sk, select_size(sk, size, flags, TX_HEADER_LEN)); } if (unlikely(!skb)) goto wait_for_memory; skb->ip_summed = CHECKSUM_UNNECESSARY; copy = mss; } if (copy > size) copy = size; if (msg->msg_flags & MSG_SPLICE_PAGES) { err = skb_splice_from_iter(skb, &msg->msg_iter, copy, sk->sk_allocation); if (err < 0) { if (err == -EMSGSIZE) goto new_buf; goto do_fault; } copy = err; sk_wmem_queued_add(sk, copy); } else if (skb_tailroom(skb) > 0) { copy = min(copy, skb_tailroom(skb)); if (is_tls_tx(csk)) copy = min_t(int, copy, csk->tlshws.txleft); err = skb_add_data_nocache(sk, skb, &msg->msg_iter, copy); if (err) goto do_fault; } else { int i = skb_shinfo(skb)->nr_frags; struct page *page = TCP_PAGE(sk); int pg_size = PAGE_SIZE; int off = TCP_OFF(sk); bool merge; if (page) pg_size = page_size(page); if (off < pg_size && skb_can_coalesce(skb, i, page, off)) { merge = true; goto copy; } merge = false; if (i == (is_tls_tx(csk) ? (MAX_SKB_FRAGS - 1) : MAX_SKB_FRAGS)) goto new_buf; if (page && off == pg_size) { put_page(page); TCP_PAGE(sk) = page = NULL; pg_size = PAGE_SIZE; } if (!page) { gfp_t gfp = sk->sk_allocation; int order = cdev->send_page_order; if (order) { page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY, order); if (page) pg_size <<= order; } if (!page) { page = alloc_page(gfp); pg_size = PAGE_SIZE; } if (!page) goto wait_for_memory; off = 0; } copy: if (copy > pg_size - off) copy = pg_size - off; if (is_tls_tx(csk)) copy = min_t(int, copy, csk->tlshws.txleft); err = chtls_skb_copy_to_page_nocache(sk, &msg->msg_iter, skb, page, off, copy); if (unlikely(err)) { if (!TCP_PAGE(sk)) { TCP_PAGE(sk) = page; TCP_OFF(sk) = 0; } goto do_fault; } /* Update the skb. */ if (merge) { skb_frag_size_add( &skb_shinfo(skb)->frags[i - 1], copy); } else { skb_fill_page_desc(skb, i, page, off, copy); if (off + copy < pg_size) { /* space left keep page */ get_page(page); TCP_PAGE(sk) = page; } else { TCP_PAGE(sk) = NULL; } } TCP_OFF(sk) = off + copy; } if (unlikely(skb->len == mss)) tx_skb_finalize(skb); tp->write_seq += copy; copied += copy; size -= copy; if (is_tls_tx(csk)) csk->tlshws.txleft -= copy; if (corked(tp, flags) && (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))) ULP_SKB_CB(skb)->flags |= ULPCB_FLAG_NO_APPEND; if (size == 0) goto out; if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_NO_APPEND) push_frames_if_head(sk); continue; wait_for_sndbuf: set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); wait_for_memory: err = csk_wait_memory(cdev, sk, &timeo); if (err) goto do_error; } out: csk_reset_flag(csk, CSK_TX_MORE_DATA); if (copied) chtls_tcp_push(sk, flags); done: release_sock(sk); return copied; do_fault: if (!skb->len) { __skb_unlink(skb, &csk->txq); sk->sk_wmem_queued -= skb->truesize; __kfree_skb(skb); } do_error: if (copied) goto out; out_err: if (csk_conn_inline(csk)) csk_reset_flag(csk, CSK_TX_MORE_DATA); copied = sk_stream_error(sk, flags, err); goto done; } void chtls_splice_eof(struct socket *sock) { struct sock *sk = sock->sk; lock_sock(sk); chtls_tcp_push(sk, 0); release_sock(sk); } static void chtls_select_window(struct sock *sk) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct tcp_sock *tp = tcp_sk(sk); unsigned int wnd = tp->rcv_wnd; wnd = max_t(unsigned int, wnd, tcp_full_space(sk)); wnd = max_t(unsigned int, MIN_RCV_WND, wnd); if (wnd > MAX_RCV_WND) wnd = MAX_RCV_WND; /* * Check if we need to grow the receive window in response to an increase in * the socket's receive buffer size. Some applications increase the buffer * size dynamically and rely on the window to grow accordingly. */ if (wnd > tp->rcv_wnd) { tp->rcv_wup -= wnd - tp->rcv_wnd; tp->rcv_wnd = wnd; /* Mark the receive window as updated */ csk_reset_flag(csk, CSK_UPDATE_RCV_WND); } } /* * Send RX credits through an RX_DATA_ACK CPL message. We are permitted * to return without sending the message in case we cannot allocate * an sk_buff. Returns the number of credits sent. */ static u32 send_rx_credits(struct chtls_sock *csk, u32 credits) { struct cpl_rx_data_ack *req; struct sk_buff *skb; skb = alloc_skb(sizeof(*req), GFP_ATOMIC); if (!skb) return 0; __skb_put(skb, sizeof(*req)); req = (struct cpl_rx_data_ack *)skb->head; set_wr_txq(skb, CPL_PRIORITY_ACK, csk->port_id); INIT_TP_WR(req, csk->tid); OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_RX_DATA_ACK, csk->tid)); req->credit_dack = cpu_to_be32(RX_CREDITS_V(credits) | RX_FORCE_ACK_F); cxgb4_ofld_send(csk->cdev->ports[csk->port_id], skb); return credits; } #define CREDIT_RETURN_STATE (TCPF_ESTABLISHED | \ TCPF_FIN_WAIT1 | \ TCPF_FIN_WAIT2) /* * Called after some received data has been read. It returns RX credits * to the HW for the amount of data processed. */ static void chtls_cleanup_rbuf(struct sock *sk, int copied) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct tcp_sock *tp; int must_send; u32 credits; u32 thres; thres = 15 * 1024; if (!sk_in_state(sk, CREDIT_RETURN_STATE)) return; chtls_select_window(sk); tp = tcp_sk(sk); credits = tp->copied_seq - tp->rcv_wup; if (unlikely(!credits)) return; /* * For coalescing to work effectively ensure the receive window has * at least 16KB left. */ must_send = credits + 16384 >= tp->rcv_wnd; if (must_send || credits >= thres) tp->rcv_wup += send_rx_credits(csk, credits); } static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct chtls_sock *csk = rcu_dereference_sk_user_data(sk); struct chtls_hws *hws = &csk->tlshws; struct net_device *dev = csk->egress_dev; struct adapter *adap = netdev2adap(dev); struct tcp_sock *tp = tcp_sk(sk); unsigned long avail; int buffers_freed; int copied = 0; int target; long timeo; int ret; buffers_freed = 0; timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) chtls_cleanup_rbuf(sk, copied); do { struct sk_buff *skb; u32 offset = 0; if (unlikely(tp->urg_data && tp->urg_seq == tp->copied_seq)) { if (copied) break; if (signal_pending(current)) { copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; break; } } skb = skb_peek(&sk->sk_receive_queue); if (skb) goto found_ok_skb; if (csk->wr_credits && skb_queue_len(&csk->txq) && chtls_push_frames(csk, csk->wr_credits == csk->wr_max_credits)) sk->sk_write_space(sk); if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current)) break; if (!timeo) break; } else { if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { copied = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_state == TCP_CLOSE) { copied = -ENOTCONN; break; } if (!timeo) { copied = -EAGAIN; break; } if (signal_pending(current)) { copied = sock_intr_errno(timeo); break; } } if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); continue; } if (copied >= target) break; chtls_cleanup_rbuf(sk, copied); ret = sk_wait_data(sk, &timeo, NULL); if (ret < 0) { copied = copied ? : ret; goto unlock; } continue; found_ok_skb: if (!skb->len) { skb_dst_set(skb, NULL); __skb_unlink(skb, &sk->sk_receive_queue); kfree_skb(skb); if (!copied && !timeo) { copied = -EAGAIN; break; } if (copied < target) { release_sock(sk); lock_sock(sk); continue; } break; } offset = hws->copied_seq; avail = skb->len - offset; if (len < avail) avail = len; if (unlikely(tp->urg_data)) { u32 urg_offset = tp->urg_seq - tp->copied_seq; if (urg_offset < avail) { if (urg_offset) { avail = urg_offset; } else if (!sock_flag(sk, SOCK_URGINLINE)) { /* First byte is urgent, skip */ tp->copied_seq++; offset++; avail--; if (!avail) goto skip_copy; } } } /* Set record type if not already done. For a non-data record, * do not proceed if record type could not be copied. */ if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { struct tls_hdr *thdr = (struct tls_hdr *)skb->data; int cerr = 0; cerr = put_cmsg(msg, SOL_TLS, TLS_GET_RECORD_TYPE, sizeof(thdr->type), &thdr->type); if (cerr && thdr->type != TLS_RECORD_TYPE_DATA) { copied = -EIO; break; } /* don't send tls header, skip copy */ goto skip_copy; } if (skb_copy_datagram_msg(skb, offset, msg, avail)) { if (!copied) { copied = -EFAULT; break; } } copied += avail; len -= avail; hws->copied_seq += avail; skip_copy: if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) tp->urg_data = 0; if ((avail + offset) >= skb->len) { struct sk_buff *next_skb; if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) { tp->copied_seq += skb->len; hws->rcvpld = skb->hdr_len; } else { atomic_inc(&adap->chcr_stats.tls_pdu_rx); tp->copied_seq += hws->rcvpld; } chtls_free_skb(sk, skb); buffers_freed++; hws->copied_seq = 0; next_skb = skb_peek(&sk->sk_receive_queue); if (copied >= target && !next_skb) break; if (ULP_SKB_CB(next_skb)->flags & ULPCB_FLAG_TLS_HDR) break; } } while (len > 0); if (buffers_freed) chtls_cleanup_rbuf(sk, copied); unlock: release_sock(sk); return copied; } /* * Peek at data in a socket's receive buffer. */ static int peekmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags) { struct tcp_sock *tp = tcp_sk(sk); u32 peek_seq, offset; struct sk_buff *skb; int copied = 0; size_t avail; /* amount of available data in current skb */ long timeo; int ret; lock_sock(sk); timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); peek_seq = tp->copied_seq; do { if (unlikely(tp->urg_data && tp->urg_seq == peek_seq)) { if (copied) break; if (signal_pending(current)) { copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; break; } } skb_queue_walk(&sk->sk_receive_queue, skb) { offset = peek_seq - ULP_SKB_CB(skb)->seq; if (offset < skb->len) goto found_ok_skb; } /* empty receive queue */ if (copied) break; if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { copied = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_state == TCP_CLOSE) { copied = -ENOTCONN; break; } if (!timeo) { copied = -EAGAIN; break; } if (signal_pending(current)) { copied = sock_intr_errno(timeo); break; } if (READ_ONCE(sk->sk_backlog.tail)) { /* Do not sleep, just process backlog. */ release_sock(sk); lock_sock(sk); } else { ret = sk_wait_data(sk, &timeo, NULL); if (ret < 0) { /* here 'copied' is 0 due to previous checks */ copied = ret; break; } } if (unlikely(peek_seq != tp->copied_seq)) { if (net_ratelimit()) pr_info("TCP(%s:%d), race in MSG_PEEK.\n", current->comm, current->pid); peek_seq = tp->copied_seq; } continue; found_ok_skb: avail = skb->len - offset; if (len < avail) avail = len; /* * Do we have urgent data here? We need to skip over the * urgent byte. */ if (unlikely(tp->urg_data)) { u32 urg_offset = tp->urg_seq - peek_seq; if (urg_offset < avail) { /* * The amount of data we are preparing to copy * contains urgent data. */ if (!urg_offset) { /* First byte is urgent */ if (!sock_flag(sk, SOCK_URGINLINE)) { peek_seq++; offset++; avail--; } if (!avail) continue; } else { /* stop short of the urgent data */ avail = urg_offset; } } } /* * If MSG_TRUNC is specified the data is discarded. */ if (likely(!(flags & MSG_TRUNC))) if (skb_copy_datagram_msg(skb, offset, msg, len)) { if (!copied) { copied = -EFAULT; break; } } peek_seq += avail; copied += avail; len -= avail; } while (len > 0); release_sock(sk); return copied; } int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int flags, int *addr_len) { struct tcp_sock *tp = tcp_sk(sk); struct chtls_sock *csk; unsigned long avail; /* amount of available data in current skb */ int buffers_freed; int copied = 0; long timeo; int target; /* Read at least this many bytes */ int ret; buffers_freed = 0; if (unlikely(flags & MSG_OOB)) return tcp_prot.recvmsg(sk, msg, len, flags, addr_len); if (unlikely(flags & MSG_PEEK)) return peekmsg(sk, msg, len, flags); if (sk_can_busy_loop(sk) && skb_queue_empty_lockless(&sk->sk_receive_queue) && sk->sk_state == TCP_ESTABLISHED) sk_busy_loop(sk, flags & MSG_DONTWAIT); lock_sock(sk); csk = rcu_dereference_sk_user_data(sk); if (is_tls_rx(csk)) return chtls_pt_recvmsg(sk, msg, len, flags, addr_len); timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); if (unlikely(csk_flag(sk, CSK_UPDATE_RCV_WND))) chtls_cleanup_rbuf(sk, copied); do { struct sk_buff *skb; u32 offset; if (unlikely(tp->urg_data && tp->urg_seq == tp->copied_seq)) { if (copied) break; if (signal_pending(current)) { copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; break; } } skb = skb_peek(&sk->sk_receive_queue); if (skb) goto found_ok_skb; if (csk->wr_credits && skb_queue_len(&csk->txq) && chtls_push_frames(csk, csk->wr_credits == csk->wr_max_credits)) sk->sk_write_space(sk); if (copied >= target && !READ_ONCE(sk->sk_backlog.tail)) break; if (copied) { if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current)) break; } else { if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { copied = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_state == TCP_CLOSE) { copied = -ENOTCONN; break; } if (!timeo) { copied = -EAGAIN; break; } if (signal_pending(current)) { copied = sock_intr_errno(timeo); break; } } if (READ_ONCE(sk->sk_backlog.tail)) { release_sock(sk); lock_sock(sk); chtls_cleanup_rbuf(sk, copied); continue; } if (copied >= target) break; chtls_cleanup_rbuf(sk, copied); ret = sk_wait_data(sk, &timeo, NULL); if (ret < 0) { copied = copied ? : ret; goto unlock; } continue; found_ok_skb: if (!skb->len) { chtls_kfree_skb(sk, skb); if (!copied && !timeo) { copied = -EAGAIN; break; } if (copied < target) continue; break; } offset = tp->copied_seq - ULP_SKB_CB(skb)->seq; avail = skb->len - offset; if (len < avail) avail = len; if (unlikely(tp->urg_data)) { u32 urg_offset = tp->urg_seq - tp->copied_seq; if (urg_offset < avail) { if (urg_offset) { avail = urg_offset; } else if (!sock_flag(sk, SOCK_URGINLINE)) { tp->copied_seq++; offset++; avail--; if (!avail) goto skip_copy; } } } if (likely(!(flags & MSG_TRUNC))) { if (skb_copy_datagram_msg(skb, offset, msg, avail)) { if (!copied) { copied = -EFAULT; break; } } } tp->copied_seq += avail; copied += avail; len -= avail; skip_copy: if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) tp->urg_data = 0; if (avail + offset >= skb->len) { chtls_free_skb(sk, skb); buffers_freed++; if (copied >= target && !skb_peek(&sk->sk_receive_queue)) break; } } while (len > 0); if (buffers_freed) chtls_cleanup_rbuf(sk, copied); unlock: release_sock(sk); return copied; }
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2007-2012 Nicira, Inc. */ #include <linux/netdevice.h> #include <net/genetlink.h> #include <net/netns/generic.h> #include "datapath.h" #include "vport-internal_dev.h" #include "vport-netdev.h" static void dp_detach_port_notify(struct vport *vport) { struct sk_buff *notify; struct datapath *dp; dp = vport->dp; notify = ovs_vport_cmd_build_info(vport, ovs_dp_get_net(dp), 0, 0, OVS_VPORT_CMD_DEL); ovs_dp_detach_port(vport); if (IS_ERR(notify)) { genl_set_err(&dp_vport_genl_family, ovs_dp_get_net(dp), 0, 0, PTR_ERR(notify)); return; } genlmsg_multicast_netns(&dp_vport_genl_family, ovs_dp_get_net(dp), notify, 0, 0, GFP_KERNEL); } void ovs_dp_notify_wq(struct work_struct *work) { struct ovs_net *ovs_net = container_of(work, struct ovs_net, dp_notify_work); struct datapath *dp; ovs_lock(); list_for_each_entry(dp, &ovs_net->dps, list_node) { int i; for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) { struct vport *vport; struct hlist_node *n; hlist_for_each_entry_safe(vport, n, &dp->ports[i], dp_hash_node) { if (vport->ops->type == OVS_VPORT_TYPE_INTERNAL) continue; if (!(netif_is_ovs_port(vport->dev))) dp_detach_port_notify(vport); } } } ovs_unlock(); } static int dp_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct ovs_net *ovs_net; struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct vport *vport = NULL; if (!ovs_is_internal_dev(dev)) vport = ovs_netdev_get_vport(dev); if (!vport) return NOTIFY_DONE; if (event == NETDEV_UNREGISTER) { /* upper_dev_unlink and decrement promisc immediately */ ovs_netdev_detach_dev(vport); /* schedule vport destroy, dev_put and genl notification */ ovs_net = net_generic(dev_net(dev), ovs_net_id); queue_work(system_wq, &ovs_net->dp_notify_work); } return NOTIFY_DONE; } struct notifier_block ovs_dp_device_notifier = { .notifier_call = dp_device_event };
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) STMicroelectronics SA 2013 * Author: Hugues Fruchet <[email protected]> for STMicroelectronics. */ #include "delta.h" #include "delta-mjpeg.h" #define MJPEG_SOF_0 0xc0 #define MJPEG_SOF_1 0xc1 #define MJPEG_SOI 0xd8 #define MJPEG_MARKER 0xff static char *header_str(struct mjpeg_header *header, char *str, unsigned int len) { char *cur = str; unsigned int left = len; if (!header) return ""; snprintf(cur, left, "[MJPEG header]\n" "|- length = %d\n" "|- precision = %d\n" "|- width = %d\n" "|- height = %d\n" "|- components = %d\n", header->length, header->sample_precision, header->frame_width, header->frame_height, header->nb_of_components); return str; } static int delta_mjpeg_read_sof(struct delta_ctx *pctx, unsigned char *data, unsigned int size, struct mjpeg_header *header) { struct delta_dev *delta = pctx->dev; unsigned int offset = 0; if (size < 64) goto err_no_more; memset(header, 0, sizeof(*header)); header->length = be16_to_cpu(*(__be16 *)(data + offset)); offset += sizeof(u16); header->sample_precision = *(u8 *)(data + offset); offset += sizeof(u8); header->frame_height = be16_to_cpu(*(__be16 *)(data + offset)); offset += sizeof(u16); header->frame_width = be16_to_cpu(*(__be16 *)(data + offset)); offset += sizeof(u16); header->nb_of_components = *(u8 *)(data + offset); offset += sizeof(u8); if (header->nb_of_components >= MJPEG_MAX_COMPONENTS) { dev_err(delta->dev, "%s unsupported number of components (%d > %d)\n", pctx->name, header->nb_of_components, MJPEG_MAX_COMPONENTS); return -EINVAL; } if ((offset + header->nb_of_components * sizeof(header->components[0])) > size) goto err_no_more; return 0; err_no_more: dev_err(delta->dev, "%s sof: reached end of %d size input stream\n", pctx->name, size); return -ENODATA; } int delta_mjpeg_read_header(struct delta_ctx *pctx, unsigned char *data, unsigned int size, struct mjpeg_header *header, unsigned int *data_offset) { struct delta_dev *delta = pctx->dev; unsigned char str[200]; unsigned int ret = 0; unsigned int offset = 0; unsigned int soi = 0; if (size < 2) goto err_no_more; offset = 0; while (1) { if (data[offset] == MJPEG_MARKER) switch (data[offset + 1]) { case MJPEG_SOI: soi = 1; *data_offset = offset; break; case MJPEG_SOF_0: case MJPEG_SOF_1: if (!soi) { dev_err(delta->dev, "%s wrong sequence, got SOF while SOI not seen\n", pctx->name); return -EINVAL; } ret = delta_mjpeg_read_sof(pctx, &data[offset + 2], size - (offset + 2), header); if (ret) goto err; goto done; default: break; } offset++; if ((offset + 2) >= size) goto err_no_more; } done: dev_dbg(delta->dev, "%s found header @ offset %d:\n%s", pctx->name, *data_offset, header_str(header, str, sizeof(str))); return 0; err_no_more: dev_err(delta->dev, "%s no header found within %d bytes input stream\n", pctx->name, size); return -ENODATA; err: return ret; }
// SPDX-License-Identifier: GPL-2.0-only /* * BTS PMU driver for perf * Copyright (c) 2013-2014, Intel Corporation. */ #undef DEBUG #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitops.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/device.h> #include <linux/coredump.h> #include <linux/sizes.h> #include <asm/perf_event.h> #include "../perf_event.h" struct bts_ctx { struct perf_output_handle handle; struct debug_store ds_back; int state; }; /* BTS context states: */ enum { /* no ongoing AUX transactions */ BTS_STATE_STOPPED = 0, /* AUX transaction is on, BTS tracing is disabled */ BTS_STATE_INACTIVE, /* AUX transaction is on, BTS tracing is running */ BTS_STATE_ACTIVE, }; static DEFINE_PER_CPU(struct bts_ctx, bts_ctx); #define BTS_RECORD_SIZE 24 #define BTS_SAFETY_MARGIN 4080 struct bts_phys { struct page *page; unsigned long size; unsigned long offset; unsigned long displacement; }; struct bts_buffer { size_t real_size; /* multiple of BTS_RECORD_SIZE */ unsigned int nr_pages; unsigned int nr_bufs; unsigned int cur_buf; bool snapshot; local_t data_size; local_t head; unsigned long end; void **data_pages; struct bts_phys buf[]; }; static struct pmu bts_pmu; static int buf_nr_pages(struct page *page) { if (!PagePrivate(page)) return 1; return 1 << page_private(page); } static size_t buf_size(struct page *page) { return buf_nr_pages(page) * PAGE_SIZE; } static void * bts_buffer_setup_aux(struct perf_event *event, void **pages, int nr_pages, bool overwrite) { struct bts_buffer *buf; struct page *page; int cpu = event->cpu; int node = (cpu == -1) ? cpu : cpu_to_node(cpu); unsigned long offset; size_t size = nr_pages << PAGE_SHIFT; int pg, nbuf, pad; /* count all the high order buffers */ for (pg = 0, nbuf = 0; pg < nr_pages;) { page = virt_to_page(pages[pg]); pg += buf_nr_pages(page); nbuf++; } /* * to avoid interrupts in overwrite mode, only allow one physical */ if (overwrite && nbuf > 1) return NULL; buf = kzalloc_node(offsetof(struct bts_buffer, buf[nbuf]), GFP_KERNEL, node); if (!buf) return NULL; buf->nr_pages = nr_pages; buf->nr_bufs = nbuf; buf->snapshot = overwrite; buf->data_pages = pages; buf->real_size = size - size % BTS_RECORD_SIZE; for (pg = 0, nbuf = 0, offset = 0, pad = 0; nbuf < buf->nr_bufs; nbuf++) { unsigned int __nr_pages; page = virt_to_page(pages[pg]); __nr_pages = buf_nr_pages(page); buf->buf[nbuf].page = page; buf->buf[nbuf].offset = offset; buf->buf[nbuf].displacement = (pad ? BTS_RECORD_SIZE - pad : 0); buf->buf[nbuf].size = buf_size(page) - buf->buf[nbuf].displacement; pad = buf->buf[nbuf].size % BTS_RECORD_SIZE; buf->buf[nbuf].size -= pad; pg += __nr_pages; offset += __nr_pages << PAGE_SHIFT; } return buf; } static void bts_buffer_free_aux(void *data) { kfree(data); } static unsigned long bts_buffer_offset(struct bts_buffer *buf, unsigned int idx) { return buf->buf[idx].offset + buf->buf[idx].displacement; } static void bts_config_buffer(struct bts_buffer *buf) { int cpu = raw_smp_processor_id(); struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; struct bts_phys *phys = &buf->buf[buf->cur_buf]; unsigned long index, thresh = 0, end = phys->size; struct page *page = phys->page; index = local_read(&buf->head); if (!buf->snapshot) { if (buf->end < phys->offset + buf_size(page)) end = buf->end - phys->offset - phys->displacement; index -= phys->offset + phys->displacement; if (end - index > BTS_SAFETY_MARGIN) thresh = end - BTS_SAFETY_MARGIN; else if (end - index > BTS_RECORD_SIZE) thresh = end - BTS_RECORD_SIZE; else thresh = end; } ds->bts_buffer_base = (u64)(long)page_address(page) + phys->displacement; ds->bts_index = ds->bts_buffer_base + index; ds->bts_absolute_maximum = ds->bts_buffer_base + end; ds->bts_interrupt_threshold = !buf->snapshot ? ds->bts_buffer_base + thresh : ds->bts_absolute_maximum + BTS_RECORD_SIZE; } static void bts_buffer_pad_out(struct bts_phys *phys, unsigned long head) { unsigned long index = head - phys->offset; memset(page_address(phys->page) + index, 0, phys->size - index); } static void bts_update(struct bts_ctx *bts) { int cpu = raw_smp_processor_id(); struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds; struct bts_buffer *buf = perf_get_aux(&bts->handle); unsigned long index = ds->bts_index - ds->bts_buffer_base, old, head; if (!buf) return; head = index + bts_buffer_offset(buf, buf->cur_buf); old = local_xchg(&buf->head, head); if (!buf->snapshot) { if (old == head) return; if (ds->bts_index >= ds->bts_absolute_maximum) perf_aux_output_flag(&bts->handle, PERF_AUX_FLAG_TRUNCATED); /* * old and head are always in the same physical buffer, so we * can subtract them to get the data size. */ local_add(head - old, &buf->data_size); } else { local_set(&buf->data_size, head); } /* * Since BTS is coherent, just add compiler barrier to ensure * BTS updating is ordered against bts::handle::event. */ barrier(); } static int bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle); /* * Ordering PMU callbacks wrt themselves and the PMI is done by means * of bts::state, which: * - is set when bts::handle::event is valid, that is, between * perf_aux_output_begin() and perf_aux_output_end(); * - is zero otherwise; * - is ordered against bts::handle::event with a compiler barrier. */ static void __bts_event_start(struct perf_event *event) { struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct bts_buffer *buf = perf_get_aux(&bts->handle); u64 config = 0; if (!buf->snapshot) config |= ARCH_PERFMON_EVENTSEL_INT; if (!event->attr.exclude_kernel) config |= ARCH_PERFMON_EVENTSEL_OS; if (!event->attr.exclude_user) config |= ARCH_PERFMON_EVENTSEL_USR; bts_config_buffer(buf); /* * local barrier to make sure that ds configuration made it * before we enable BTS and bts::state goes ACTIVE */ wmb(); /* INACTIVE/STOPPED -> ACTIVE */ WRITE_ONCE(bts->state, BTS_STATE_ACTIVE); intel_pmu_enable_bts(config); } static void bts_event_start(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct bts_buffer *buf; buf = perf_aux_output_begin(&bts->handle, event); if (!buf) goto fail_stop; if (bts_buffer_reset(buf, &bts->handle)) goto fail_end_stop; bts->ds_back.bts_buffer_base = cpuc->ds->bts_buffer_base; bts->ds_back.bts_absolute_maximum = cpuc->ds->bts_absolute_maximum; bts->ds_back.bts_interrupt_threshold = cpuc->ds->bts_interrupt_threshold; perf_event_itrace_started(event); event->hw.state = 0; __bts_event_start(event); return; fail_end_stop: perf_aux_output_end(&bts->handle, 0); fail_stop: event->hw.state = PERF_HES_STOPPED; } static void __bts_event_stop(struct perf_event *event, int state) { struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); /* ACTIVE -> INACTIVE(PMI)/STOPPED(->stop()) */ WRITE_ONCE(bts->state, state); /* * No extra synchronization is mandated by the documentation to have * BTS data stores globally visible. */ intel_pmu_disable_bts(); } static void bts_event_stop(struct perf_event *event, int flags) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct bts_buffer *buf = NULL; int state = READ_ONCE(bts->state); if (state == BTS_STATE_ACTIVE) __bts_event_stop(event, BTS_STATE_STOPPED); if (state != BTS_STATE_STOPPED) buf = perf_get_aux(&bts->handle); event->hw.state |= PERF_HES_STOPPED; if (flags & PERF_EF_UPDATE) { bts_update(bts); if (buf) { if (buf->snapshot) bts->handle.head = local_xchg(&buf->data_size, buf->nr_pages << PAGE_SHIFT); perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0)); } cpuc->ds->bts_index = bts->ds_back.bts_buffer_base; cpuc->ds->bts_buffer_base = bts->ds_back.bts_buffer_base; cpuc->ds->bts_absolute_maximum = bts->ds_back.bts_absolute_maximum; cpuc->ds->bts_interrupt_threshold = bts->ds_back.bts_interrupt_threshold; } } void intel_bts_enable_local(void) { struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); int state = READ_ONCE(bts->state); /* * Here we transition from INACTIVE to ACTIVE; * if we instead are STOPPED from the interrupt handler, * stay that way. Can't be ACTIVE here though. */ if (WARN_ON_ONCE(state == BTS_STATE_ACTIVE)) return; if (state == BTS_STATE_STOPPED) return; if (bts->handle.event) __bts_event_start(bts->handle.event); } void intel_bts_disable_local(void) { struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); /* * Here we transition from ACTIVE to INACTIVE; * do nothing for STOPPED or INACTIVE. */ if (READ_ONCE(bts->state) != BTS_STATE_ACTIVE) return; if (bts->handle.event) __bts_event_stop(bts->handle.event, BTS_STATE_INACTIVE); } static int bts_buffer_reset(struct bts_buffer *buf, struct perf_output_handle *handle) { unsigned long head, space, next_space, pad, gap, skip, wakeup; unsigned int next_buf; struct bts_phys *phys, *next_phys; int ret; if (buf->snapshot) return 0; head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1); phys = &buf->buf[buf->cur_buf]; space = phys->offset + phys->displacement + phys->size - head; pad = space; if (space > handle->size) { space = handle->size; space -= space % BTS_RECORD_SIZE; } if (space <= BTS_SAFETY_MARGIN) { /* See if next phys buffer has more space */ next_buf = buf->cur_buf + 1; if (next_buf >= buf->nr_bufs) next_buf = 0; next_phys = &buf->buf[next_buf]; gap = buf_size(phys->page) - phys->displacement - phys->size + next_phys->displacement; skip = pad + gap; if (handle->size >= skip) { next_space = next_phys->size; if (next_space + skip > handle->size) { next_space = handle->size - skip; next_space -= next_space % BTS_RECORD_SIZE; } if (next_space > space || !space) { if (pad) bts_buffer_pad_out(phys, head); ret = perf_aux_output_skip(handle, skip); if (ret) return ret; /* Advance to next phys buffer */ phys = next_phys; space = next_space; head = phys->offset + phys->displacement; /* * After this, cur_buf and head won't match ds * anymore, so we must not be racing with * bts_update(). */ buf->cur_buf = next_buf; local_set(&buf->head, head); } } } /* Don't go far beyond wakeup watermark */ wakeup = BTS_SAFETY_MARGIN + BTS_RECORD_SIZE + handle->wakeup - handle->head; if (space > wakeup) { space = wakeup; space -= space % BTS_RECORD_SIZE; } buf->end = head + space; /* * If we have no space, the lost notification would have been sent when * we hit absolute_maximum - see bts_update() */ if (!space) return -ENOSPC; return 0; } int intel_bts_interrupt(void) { struct debug_store *ds = this_cpu_ptr(&cpu_hw_events)->ds; struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct perf_event *event = bts->handle.event; struct bts_buffer *buf; s64 old_head; int err = -ENOSPC, handled = 0; /* * The only surefire way of knowing if this NMI is ours is by checking * the write ptr against the PMI threshold. */ if (ds && (ds->bts_index >= ds->bts_interrupt_threshold)) handled = 1; /* * this is wrapped in intel_bts_enable_local/intel_bts_disable_local, * so we can only be INACTIVE or STOPPED */ if (READ_ONCE(bts->state) == BTS_STATE_STOPPED) return handled; buf = perf_get_aux(&bts->handle); if (!buf) return handled; /* * Skip snapshot counters: they don't use the interrupt, but * there's no other way of telling, because the pointer will * keep moving */ if (buf->snapshot) return 0; old_head = local_read(&buf->head); bts_update(bts); /* no new data */ if (old_head == local_read(&buf->head)) return handled; perf_aux_output_end(&bts->handle, local_xchg(&buf->data_size, 0)); buf = perf_aux_output_begin(&bts->handle, event); if (buf) err = bts_buffer_reset(buf, &bts->handle); if (err) { WRITE_ONCE(bts->state, BTS_STATE_STOPPED); if (buf) { /* * BTS_STATE_STOPPED should be visible before * cleared handle::event */ barrier(); perf_aux_output_end(&bts->handle, 0); } } return 1; } static void bts_event_del(struct perf_event *event, int mode) { bts_event_stop(event, PERF_EF_UPDATE); } static int bts_event_add(struct perf_event *event, int mode) { struct bts_ctx *bts = this_cpu_ptr(&bts_ctx); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct hw_perf_event *hwc = &event->hw; event->hw.state = PERF_HES_STOPPED; if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) return -EBUSY; if (bts->handle.event) return -EBUSY; if (mode & PERF_EF_START) { bts_event_start(event, 0); if (hwc->state & PERF_HES_STOPPED) return -EINVAL; } return 0; } static void bts_event_destroy(struct perf_event *event) { x86_release_hardware(); x86_del_exclusive(x86_lbr_exclusive_bts); } static int bts_event_init(struct perf_event *event) { int ret; if (event->attr.type != bts_pmu.type) return -ENOENT; /* * BTS leaks kernel addresses even when CPL0 tracing is * disabled, so disallow intel_bts driver for unprivileged * users on paranoid systems since it provides trace data * to the user in a zero-copy fashion. */ if (event->attr.exclude_kernel) { ret = perf_allow_kernel(&event->attr); if (ret) return ret; } if (x86_add_exclusive(x86_lbr_exclusive_bts)) return -EBUSY; ret = x86_reserve_hardware(); if (ret) { x86_del_exclusive(x86_lbr_exclusive_bts); return ret; } event->destroy = bts_event_destroy; return 0; } static void bts_event_read(struct perf_event *event) { } static __init int bts_init(void) { if (!boot_cpu_has(X86_FEATURE_DTES64) || !x86_pmu.bts) return -ENODEV; if (boot_cpu_has(X86_FEATURE_PTI)) { /* * BTS hardware writes through a virtual memory map we must * either use the kernel physical map, or the user mapping of * the AUX buffer. * * However, since this driver supports per-CPU and per-task inherit * we cannot use the user mapping since it will not be available * if we're not running the owning process. * * With PTI we can't use the kernel map either, because its not * there when we run userspace. * * For now, disable this driver when using PTI. */ return -ENODEV; } bts_pmu.capabilities = PERF_PMU_CAP_AUX_NO_SG | PERF_PMU_CAP_ITRACE | PERF_PMU_CAP_EXCLUSIVE; bts_pmu.task_ctx_nr = perf_sw_context; bts_pmu.event_init = bts_event_init; bts_pmu.add = bts_event_add; bts_pmu.del = bts_event_del; bts_pmu.start = bts_event_start; bts_pmu.stop = bts_event_stop; bts_pmu.read = bts_event_read; bts_pmu.setup_aux = bts_buffer_setup_aux; bts_pmu.free_aux = bts_buffer_free_aux; return perf_pmu_register(&bts_pmu, "intel_bts", -1); } arch_initcall(bts_init);
// SPDX-License-Identifier: GPL-2.0+ /* * Copyright (C) 2016 Freescale Semiconductor, Inc. * Copyright 2017~2018 NXP * */ #include <linux/bits.h> #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include "../clk-fractional-divider.h" #include "clk.h" #define PCG_PR_MASK BIT(31) #define PCG_PCS_SHIFT 24 #define PCG_PCS_MASK 0x7 #define PCG_CGC_SHIFT 30 #define PCG_FRAC_SHIFT 3 #define PCG_FRAC_WIDTH 1 #define PCG_PCD_SHIFT 0 #define PCG_PCD_WIDTH 3 #define SW_RST BIT(28) static int pcc_gate_enable(struct clk_hw *hw) { struct clk_gate *gate = to_clk_gate(hw); unsigned long flags; u32 val; int ret; ret = clk_gate_ops.enable(hw); if (ret) return ret; spin_lock_irqsave(gate->lock, flags); /* * release the sw reset for peripherals associated with * with this pcc clock. */ val = readl(gate->reg); val |= SW_RST; writel(val, gate->reg); spin_unlock_irqrestore(gate->lock, flags); return 0; } static void pcc_gate_disable(struct clk_hw *hw) { clk_gate_ops.disable(hw); } static int pcc_gate_is_enabled(struct clk_hw *hw) { return clk_gate_ops.is_enabled(hw); } static const struct clk_ops pcc_gate_ops = { .enable = pcc_gate_enable, .disable = pcc_gate_disable, .is_enabled = pcc_gate_is_enabled, }; static struct clk_hw *imx_ulp_clk_hw_composite(const char *name, const char * const *parent_names, int num_parents, bool mux_present, bool rate_present, bool gate_present, void __iomem *reg, bool has_swrst) { struct clk_hw *mux_hw = NULL, *fd_hw = NULL, *gate_hw = NULL; struct clk_fractional_divider *fd = NULL; struct clk_gate *gate = NULL; struct clk_mux *mux = NULL; struct clk_hw *hw; u32 val; val = readl(reg); if (!(val & PCG_PR_MASK)) { pr_info("PCC PR is 0 for clk:%s, bypass\n", name); return NULL; } if (mux_present) { mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) return ERR_PTR(-ENOMEM); mux_hw = &mux->hw; mux->reg = reg; mux->shift = PCG_PCS_SHIFT; mux->mask = PCG_PCS_MASK; if (has_swrst) mux->lock = &imx_ccm_lock; } if (rate_present) { fd = kzalloc(sizeof(*fd), GFP_KERNEL); if (!fd) { kfree(mux); return ERR_PTR(-ENOMEM); } fd_hw = &fd->hw; fd->reg = reg; fd->mshift = PCG_FRAC_SHIFT; fd->mwidth = PCG_FRAC_WIDTH; fd->nshift = PCG_PCD_SHIFT; fd->nwidth = PCG_PCD_WIDTH; fd->flags = CLK_FRAC_DIVIDER_ZERO_BASED; if (has_swrst) fd->lock = &imx_ccm_lock; } if (gate_present) { gate = kzalloc(sizeof(*gate), GFP_KERNEL); if (!gate) { kfree(mux); kfree(fd); return ERR_PTR(-ENOMEM); } gate_hw = &gate->hw; gate->reg = reg; gate->bit_idx = PCG_CGC_SHIFT; if (has_swrst) gate->lock = &imx_ccm_lock; /* * make sure clock is gated during clock tree initialization, * the HW ONLY allow clock parent/rate changed with clock gated, * during clock tree initialization, clocks could be enabled * by bootloader, so the HW status will mismatch with clock tree * prepare count, then clock core driver will allow parent/rate * change since the prepare count is zero, but HW actually * prevent the parent/rate change due to the clock is enabled. */ val = readl_relaxed(reg); val &= ~(1 << PCG_CGC_SHIFT); writel_relaxed(val, reg); } hw = clk_hw_register_composite(NULL, name, parent_names, num_parents, mux_hw, &clk_mux_ops, fd_hw, &clk_fractional_divider_ops, gate_hw, has_swrst ? &pcc_gate_ops : &clk_gate_ops, CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE | CLK_SET_RATE_NO_REPARENT); if (IS_ERR(hw)) { kfree(mux); kfree(fd); kfree(gate); } return hw; } struct clk_hw *imx7ulp_clk_hw_composite(const char *name, const char * const *parent_names, int num_parents, bool mux_present, bool rate_present, bool gate_present, void __iomem *reg) { return imx_ulp_clk_hw_composite(name, parent_names, num_parents, mux_present, rate_present, gate_present, reg, false); } struct clk_hw *imx8ulp_clk_hw_composite(const char *name, const char * const *parent_names, int num_parents, bool mux_present, bool rate_present, bool gate_present, void __iomem *reg, bool has_swrst) { return imx_ulp_clk_hw_composite(name, parent_names, num_parents, mux_present, rate_present, gate_present, reg, has_swrst); } EXPORT_SYMBOL_GPL(imx8ulp_clk_hw_composite);
// SPDX-License-Identifier: GPL-2.0-only // Copyright(c) 2021 Intel Corporation. All rights reserved. #include <linux/platform_device.h> #include <linux/mod_devicetable.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/sizes.h> #include <linux/bits.h> #include <cxl/mailbox.h> #include <linux/unaligned.h> #include <crypto/sha2.h> #include <cxlmem.h> #include "trace.h" #define LSA_SIZE SZ_128K #define FW_SIZE SZ_64M #define FW_SLOTS 3 #define DEV_SIZE SZ_2G #define EFFECT(x) (1U << x) #define MOCK_INJECT_DEV_MAX 8 #define MOCK_INJECT_TEST_MAX 128 static unsigned int poison_inject_dev_max = MOCK_INJECT_DEV_MAX; enum cxl_command_effects { CONF_CHANGE_COLD_RESET = 0, CONF_CHANGE_IMMEDIATE, DATA_CHANGE_IMMEDIATE, POLICY_CHANGE_IMMEDIATE, LOG_CHANGE_IMMEDIATE, SECURITY_CHANGE_IMMEDIATE, BACKGROUND_OP, SECONDARY_MBOX_SUPPORTED, }; #define CXL_CMD_EFFECT_NONE cpu_to_le16(0) static struct cxl_cel_entry mock_cel[] = { { .opcode = cpu_to_le16(CXL_MBOX_OP_GET_SUPPORTED_LOGS), .effect = CXL_CMD_EFFECT_NONE, }, { .opcode = cpu_to_le16(CXL_MBOX_OP_IDENTIFY), .effect = CXL_CMD_EFFECT_NONE, }, { .opcode = cpu_to_le16(CXL_MBOX_OP_GET_LSA), .effect = CXL_CMD_EFFECT_NONE, }, { .opcode = cpu_to_le16(CXL_MBOX_OP_GET_PARTITION_INFO), .effect = CXL_CMD_EFFECT_NONE, }, { .opcode = cpu_to_le16(CXL_MBOX_OP_SET_LSA), .effect = cpu_to_le16(EFFECT(CONF_CHANGE_IMMEDIATE) | EFFECT(DATA_CHANGE_IMMEDIATE)), }, { .opcode = cpu_to_le16(CXL_MBOX_OP_GET_HEALTH_INFO), .effect = CXL_CMD_EFFECT_NONE, }, { .opcode = cpu_to_le16(CXL_MBOX_OP_GET_POISON), .effect = CXL_CMD_EFFECT_NONE, }, { .opcode = cpu_to_le16(CXL_MBOX_OP_INJECT_POISON), .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)), }, { .opcode = cpu_to_le16(CXL_MBOX_OP_CLEAR_POISON), .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE)), }, { .opcode = cpu_to_le16(CXL_MBOX_OP_GET_FW_INFO), .effect = CXL_CMD_EFFECT_NONE, }, { .opcode = cpu_to_le16(CXL_MBOX_OP_TRANSFER_FW), .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) | EFFECT(BACKGROUND_OP)), }, { .opcode = cpu_to_le16(CXL_MBOX_OP_ACTIVATE_FW), .effect = cpu_to_le16(EFFECT(CONF_CHANGE_COLD_RESET) | EFFECT(CONF_CHANGE_IMMEDIATE)), }, { .opcode = cpu_to_le16(CXL_MBOX_OP_SANITIZE), .effect = cpu_to_le16(EFFECT(DATA_CHANGE_IMMEDIATE) | EFFECT(SECURITY_CHANGE_IMMEDIATE) | EFFECT(BACKGROUND_OP)), }, }; /* See CXL 2.0 Table 181 Get Health Info Output Payload */ struct cxl_mbox_health_info { u8 health_status; u8 media_status; u8 ext_status; u8 life_used; __le16 temperature; __le32 dirty_shutdowns; __le32 volatile_errors; __le32 pmem_errors; } __packed; static struct { struct cxl_mbox_get_supported_logs gsl; struct cxl_gsl_entry entry; } mock_gsl_payload = { .gsl = { .entries = cpu_to_le16(1), }, .entry = { .uuid = DEFINE_CXL_CEL_UUID, .size = cpu_to_le32(sizeof(mock_cel)), }, }; #define PASS_TRY_LIMIT 3 #define CXL_TEST_EVENT_CNT_MAX 15 /* Set a number of events to return at a time for simulation. */ #define CXL_TEST_EVENT_RET_MAX 4 struct mock_event_log { u16 clear_idx; u16 cur_idx; u16 nr_events; u16 nr_overflow; u16 overflow_reset; struct cxl_event_record_raw *events[CXL_TEST_EVENT_CNT_MAX]; }; struct mock_event_store { struct mock_event_log mock_logs[CXL_EVENT_TYPE_MAX]; u32 ev_status; }; struct cxl_mockmem_data { void *lsa; void *fw; int fw_slot; int fw_staged; size_t fw_size; u32 security_state; u8 user_pass[NVDIMM_PASSPHRASE_LEN]; u8 master_pass[NVDIMM_PASSPHRASE_LEN]; int user_limit; int master_limit; struct mock_event_store mes; struct cxl_memdev_state *mds; u8 event_buf[SZ_4K]; u64 timestamp; unsigned long sanitize_timeout; }; static struct mock_event_log *event_find_log(struct device *dev, int log_type) { struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); if (log_type >= CXL_EVENT_TYPE_MAX) return NULL; return &mdata->mes.mock_logs[log_type]; } static struct cxl_event_record_raw *event_get_current(struct mock_event_log *log) { return log->events[log->cur_idx]; } static void event_reset_log(struct mock_event_log *log) { log->cur_idx = 0; log->clear_idx = 0; log->nr_overflow = log->overflow_reset; } /* Handle can never be 0 use 1 based indexing for handle */ static u16 event_get_clear_handle(struct mock_event_log *log) { return log->clear_idx + 1; } /* Handle can never be 0 use 1 based indexing for handle */ static __le16 event_get_cur_event_handle(struct mock_event_log *log) { u16 cur_handle = log->cur_idx + 1; return cpu_to_le16(cur_handle); } static bool event_log_empty(struct mock_event_log *log) { return log->cur_idx == log->nr_events; } static void mes_add_event(struct mock_event_store *mes, enum cxl_event_log_type log_type, struct cxl_event_record_raw *event) { struct mock_event_log *log; if (WARN_ON(log_type >= CXL_EVENT_TYPE_MAX)) return; log = &mes->mock_logs[log_type]; if ((log->nr_events + 1) > CXL_TEST_EVENT_CNT_MAX) { log->nr_overflow++; log->overflow_reset = log->nr_overflow; return; } log->events[log->nr_events] = event; log->nr_events++; } /* * Vary the number of events returned to simulate events occuring while the * logs are being read. */ static int ret_limit = 0; static int mock_get_event(struct device *dev, struct cxl_mbox_cmd *cmd) { struct cxl_get_event_payload *pl; struct mock_event_log *log; u16 nr_overflow; u8 log_type; int i; if (cmd->size_in != sizeof(log_type)) return -EINVAL; ret_limit = (ret_limit + 1) % CXL_TEST_EVENT_RET_MAX; if (!ret_limit) ret_limit = 1; if (cmd->size_out < struct_size(pl, records, ret_limit)) return -EINVAL; log_type = *((u8 *)cmd->payload_in); if (log_type >= CXL_EVENT_TYPE_MAX) return -EINVAL; memset(cmd->payload_out, 0, struct_size(pl, records, 0)); log = event_find_log(dev, log_type); if (!log || event_log_empty(log)) return 0; pl = cmd->payload_out; for (i = 0; i < ret_limit && !event_log_empty(log); i++) { memcpy(&pl->records[i], event_get_current(log), sizeof(pl->records[i])); pl->records[i].event.generic.hdr.handle = event_get_cur_event_handle(log); log->cur_idx++; } cmd->size_out = struct_size(pl, records, i); pl->record_count = cpu_to_le16(i); if (!event_log_empty(log)) pl->flags |= CXL_GET_EVENT_FLAG_MORE_RECORDS; if (log->nr_overflow) { u64 ns; pl->flags |= CXL_GET_EVENT_FLAG_OVERFLOW; pl->overflow_err_count = cpu_to_le16(nr_overflow); ns = ktime_get_real_ns(); ns -= 5000000000; /* 5s ago */ pl->first_overflow_timestamp = cpu_to_le64(ns); ns = ktime_get_real_ns(); ns -= 1000000000; /* 1s ago */ pl->last_overflow_timestamp = cpu_to_le64(ns); } return 0; } static int mock_clear_event(struct device *dev, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_clear_event_payload *pl = cmd->payload_in; struct mock_event_log *log; u8 log_type = pl->event_log; u16 handle; int nr; if (log_type >= CXL_EVENT_TYPE_MAX) return -EINVAL; log = event_find_log(dev, log_type); if (!log) return 0; /* No mock data in this log */ /* * This check is technically not invalid per the specification AFAICS. * (The host could 'guess' handles and clear them in order). * However, this is not good behavior for the host so test it. */ if (log->clear_idx + pl->nr_recs > log->cur_idx) { dev_err(dev, "Attempting to clear more events than returned!\n"); return -EINVAL; } /* Check handle order prior to clearing events */ for (nr = 0, handle = event_get_clear_handle(log); nr < pl->nr_recs; nr++, handle++) { if (handle != le16_to_cpu(pl->handles[nr])) { dev_err(dev, "Clearing events out of order\n"); return -EINVAL; } } if (log->nr_overflow) log->nr_overflow = 0; /* Clear events */ log->clear_idx += pl->nr_recs; return 0; } static void cxl_mock_event_trigger(struct device *dev) { struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); struct mock_event_store *mes = &mdata->mes; int i; for (i = CXL_EVENT_TYPE_INFO; i < CXL_EVENT_TYPE_MAX; i++) { struct mock_event_log *log; log = event_find_log(dev, i); if (log) event_reset_log(log); } cxl_mem_get_event_records(mdata->mds, mes->ev_status); } struct cxl_event_record_raw maint_needed = { .id = UUID_INIT(0xBA5EBA11, 0xABCD, 0xEFEB, 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5), .event.generic = { .hdr = { .length = sizeof(struct cxl_event_record_raw), .flags[0] = CXL_EVENT_RECORD_FLAG_MAINT_NEEDED, /* .handle = Set dynamically */ .related_handle = cpu_to_le16(0xa5b6), }, .data = { 0xDE, 0xAD, 0xBE, 0xEF }, }, }; struct cxl_event_record_raw hardware_replace = { .id = UUID_INIT(0xABCDEFEB, 0xBA11, 0xBA5E, 0xa5, 0x5a, 0xa5, 0x5a, 0xa5, 0xa5, 0x5a, 0xa5), .event.generic = { .hdr = { .length = sizeof(struct cxl_event_record_raw), .flags[0] = CXL_EVENT_RECORD_FLAG_HW_REPLACE, /* .handle = Set dynamically */ .related_handle = cpu_to_le16(0xb6a5), }, .data = { 0xDE, 0xAD, 0xBE, 0xEF }, }, }; struct cxl_test_gen_media { uuid_t id; struct cxl_event_gen_media rec; } __packed; struct cxl_test_gen_media gen_media = { .id = CXL_EVENT_GEN_MEDIA_UUID, .rec = { .media_hdr = { .hdr = { .length = sizeof(struct cxl_test_gen_media), .flags[0] = CXL_EVENT_RECORD_FLAG_PERMANENT, /* .handle = Set dynamically */ .related_handle = cpu_to_le16(0), }, .phys_addr = cpu_to_le64(0x2000), .descriptor = CXL_GMER_EVT_DESC_UNCORECTABLE_EVENT, .type = CXL_GMER_MEM_EVT_TYPE_DATA_PATH_ERROR, .transaction_type = CXL_GMER_TRANS_HOST_WRITE, /* .validity_flags = <set below> */ .channel = 1, .rank = 30, }, }, }; struct cxl_test_dram { uuid_t id; struct cxl_event_dram rec; } __packed; struct cxl_test_dram dram = { .id = CXL_EVENT_DRAM_UUID, .rec = { .media_hdr = { .hdr = { .length = sizeof(struct cxl_test_dram), .flags[0] = CXL_EVENT_RECORD_FLAG_PERF_DEGRADED, /* .handle = Set dynamically */ .related_handle = cpu_to_le16(0), }, .phys_addr = cpu_to_le64(0x8000), .descriptor = CXL_GMER_EVT_DESC_THRESHOLD_EVENT, .type = CXL_GMER_MEM_EVT_TYPE_INV_ADDR, .transaction_type = CXL_GMER_TRANS_INTERNAL_MEDIA_SCRUB, /* .validity_flags = <set below> */ .channel = 1, }, .bank_group = 5, .bank = 2, .column = {0xDE, 0xAD}, }, }; struct cxl_test_mem_module { uuid_t id; struct cxl_event_mem_module rec; } __packed; struct cxl_test_mem_module mem_module = { .id = CXL_EVENT_MEM_MODULE_UUID, .rec = { .hdr = { .length = sizeof(struct cxl_test_mem_module), /* .handle = Set dynamically */ .related_handle = cpu_to_le16(0), }, .event_type = CXL_MMER_TEMP_CHANGE, .info = { .health_status = CXL_DHI_HS_PERFORMANCE_DEGRADED, .media_status = CXL_DHI_MS_ALL_DATA_LOST, .add_status = (CXL_DHI_AS_CRITICAL << 2) | (CXL_DHI_AS_WARNING << 4) | (CXL_DHI_AS_WARNING << 5), .device_temp = { 0xDE, 0xAD}, .dirty_shutdown_cnt = { 0xde, 0xad, 0xbe, 0xef }, .cor_vol_err_cnt = { 0xde, 0xad, 0xbe, 0xef }, .cor_per_err_cnt = { 0xde, 0xad, 0xbe, 0xef }, } }, }; static int mock_set_timestamp(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mockmem_data *mdata = dev_get_drvdata(cxlds->dev); struct cxl_mbox_set_timestamp_in *ts = cmd->payload_in; if (cmd->size_in != sizeof(*ts)) return -EINVAL; if (cmd->size_out != 0) return -EINVAL; mdata->timestamp = le64_to_cpu(ts->timestamp); return 0; } static void cxl_mock_add_event_logs(struct mock_event_store *mes) { put_unaligned_le16(CXL_GMER_VALID_CHANNEL | CXL_GMER_VALID_RANK, &gen_media.rec.media_hdr.validity_flags); put_unaligned_le16(CXL_DER_VALID_CHANNEL | CXL_DER_VALID_BANK_GROUP | CXL_DER_VALID_BANK | CXL_DER_VALID_COLUMN, &dram.rec.media_hdr.validity_flags); mes_add_event(mes, CXL_EVENT_TYPE_INFO, &maint_needed); mes_add_event(mes, CXL_EVENT_TYPE_INFO, (struct cxl_event_record_raw *)&gen_media); mes_add_event(mes, CXL_EVENT_TYPE_INFO, (struct cxl_event_record_raw *)&mem_module); mes->ev_status |= CXLDEV_EVENT_STATUS_INFO; mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &maint_needed); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, (struct cxl_event_record_raw *)&dram); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, (struct cxl_event_record_raw *)&gen_media); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, (struct cxl_event_record_raw *)&mem_module); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, (struct cxl_event_record_raw *)&dram); /* Overflow this log */ mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FAIL, &hardware_replace); mes->ev_status |= CXLDEV_EVENT_STATUS_FAIL; mes_add_event(mes, CXL_EVENT_TYPE_FATAL, &hardware_replace); mes_add_event(mes, CXL_EVENT_TYPE_FATAL, (struct cxl_event_record_raw *)&dram); mes->ev_status |= CXLDEV_EVENT_STATUS_FATAL; } static int mock_gsl(struct cxl_mbox_cmd *cmd) { if (cmd->size_out < sizeof(mock_gsl_payload)) return -EINVAL; memcpy(cmd->payload_out, &mock_gsl_payload, sizeof(mock_gsl_payload)); cmd->size_out = sizeof(mock_gsl_payload); return 0; } static int mock_get_log(struct cxl_memdev_state *mds, struct cxl_mbox_cmd *cmd) { struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; struct cxl_mbox_get_log *gl = cmd->payload_in; u32 offset = le32_to_cpu(gl->offset); u32 length = le32_to_cpu(gl->length); uuid_t uuid = DEFINE_CXL_CEL_UUID; void *data = &mock_cel; if (cmd->size_in < sizeof(*gl)) return -EINVAL; if (length > cxl_mbox->payload_size) return -EINVAL; if (offset + length > sizeof(mock_cel)) return -EINVAL; if (!uuid_equal(&gl->uuid, &uuid)) return -EINVAL; if (length > cmd->size_out) return -EINVAL; memcpy(cmd->payload_out, data + offset, length); return 0; } static int mock_rcd_id(struct cxl_mbox_cmd *cmd) { struct cxl_mbox_identify id = { .fw_revision = { "mock fw v1 " }, .total_capacity = cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), .volatile_capacity = cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), }; if (cmd->size_out < sizeof(id)) return -EINVAL; memcpy(cmd->payload_out, &id, sizeof(id)); return 0; } static int mock_id(struct cxl_mbox_cmd *cmd) { struct cxl_mbox_identify id = { .fw_revision = { "mock fw v1 " }, .lsa_size = cpu_to_le32(LSA_SIZE), .partition_align = cpu_to_le64(SZ_256M / CXL_CAPACITY_MULTIPLIER), .total_capacity = cpu_to_le64(DEV_SIZE / CXL_CAPACITY_MULTIPLIER), .inject_poison_limit = cpu_to_le16(MOCK_INJECT_TEST_MAX), }; put_unaligned_le24(CXL_POISON_LIST_MAX, id.poison_list_max_mer); if (cmd->size_out < sizeof(id)) return -EINVAL; memcpy(cmd->payload_out, &id, sizeof(id)); return 0; } static int mock_partition_info(struct cxl_mbox_cmd *cmd) { struct cxl_mbox_get_partition_info pi = { .active_volatile_cap = cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER), .active_persistent_cap = cpu_to_le64(DEV_SIZE / 2 / CXL_CAPACITY_MULTIPLIER), }; if (cmd->size_out < sizeof(pi)) return -EINVAL; memcpy(cmd->payload_out, &pi, sizeof(pi)); return 0; } void cxl_mockmem_sanitize_work(struct work_struct *work) { struct cxl_memdev_state *mds = container_of(work, typeof(*mds), security.poll_dwork.work); struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; mutex_lock(&cxl_mbox->mbox_mutex); if (mds->security.sanitize_node) sysfs_notify_dirent(mds->security.sanitize_node); mds->security.sanitize_active = false; mutex_unlock(&cxl_mbox->mbox_mutex); dev_dbg(mds->cxlds.dev, "sanitize complete\n"); } static int mock_sanitize(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { struct cxl_memdev_state *mds = mdata->mds; struct cxl_mailbox *cxl_mbox = &mds->cxlds.cxl_mbox; int rc = 0; if (cmd->size_in != 0) return -EINVAL; if (cmd->size_out != 0) return -EINVAL; if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } mutex_lock(&cxl_mbox->mbox_mutex); if (schedule_delayed_work(&mds->security.poll_dwork, msecs_to_jiffies(mdata->sanitize_timeout))) { mds->security.sanitize_active = true; dev_dbg(mds->cxlds.dev, "sanitize issued\n"); } else rc = -EBUSY; mutex_unlock(&cxl_mbox->mbox_mutex); return rc; } static int mock_secure_erase(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { if (cmd->size_in != 0) return -EINVAL; if (cmd->size_out != 0) return -EINVAL; if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } return 0; } static int mock_get_security_state(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { if (cmd->size_in) return -EINVAL; if (cmd->size_out != sizeof(u32)) return -EINVAL; memcpy(cmd->payload_out, &mdata->security_state, sizeof(u32)); return 0; } static void master_plimit_check(struct cxl_mockmem_data *mdata) { if (mdata->master_limit == PASS_TRY_LIMIT) return; mdata->master_limit++; if (mdata->master_limit == PASS_TRY_LIMIT) mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PLIMIT; } static void user_plimit_check(struct cxl_mockmem_data *mdata) { if (mdata->user_limit == PASS_TRY_LIMIT) return; mdata->user_limit++; if (mdata->user_limit == PASS_TRY_LIMIT) mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; } static int mock_set_passphrase(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { struct cxl_set_pass *set_pass; if (cmd->size_in != sizeof(*set_pass)) return -EINVAL; if (cmd->size_out != 0) return -EINVAL; if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } set_pass = cmd->payload_in; switch (set_pass->type) { case CXL_PMEM_SEC_PASS_MASTER: if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } /* * CXL spec rev3.0 8.2.9.8.6.2, The master pasphrase shall only be set in * the security disabled state when the user passphrase is not set. */ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (memcmp(mdata->master_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) { master_plimit_check(mdata); cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; return -ENXIO; } memcpy(mdata->master_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN); mdata->security_state |= CXL_PMEM_SEC_STATE_MASTER_PASS_SET; return 0; case CXL_PMEM_SEC_PASS_USER: if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (memcmp(mdata->user_pass, set_pass->old_pass, NVDIMM_PASSPHRASE_LEN)) { user_plimit_check(mdata); cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; return -ENXIO; } memcpy(mdata->user_pass, set_pass->new_pass, NVDIMM_PASSPHRASE_LEN); mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PASS_SET; return 0; default: cmd->return_code = CXL_MBOX_CMD_RC_INPUT; } return -EINVAL; } static int mock_disable_passphrase(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { struct cxl_disable_pass *dis_pass; if (cmd->size_in != sizeof(*dis_pass)) return -EINVAL; if (cmd->size_out != 0) return -EINVAL; if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } dis_pass = cmd->payload_in; switch (dis_pass->type) { case CXL_PMEM_SEC_PASS_MASTER: if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (!(mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET)) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (memcmp(dis_pass->pass, mdata->master_pass, NVDIMM_PASSPHRASE_LEN)) { master_plimit_check(mdata); cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; return -ENXIO; } mdata->master_limit = 0; memset(mdata->master_pass, 0, NVDIMM_PASSPHRASE_LEN); mdata->security_state &= ~CXL_PMEM_SEC_STATE_MASTER_PASS_SET; return 0; case CXL_PMEM_SEC_PASS_USER: if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (memcmp(dis_pass->pass, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) { user_plimit_check(mdata); cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; return -ENXIO; } mdata->user_limit = 0; memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); mdata->security_state &= ~(CXL_PMEM_SEC_STATE_USER_PASS_SET | CXL_PMEM_SEC_STATE_LOCKED); return 0; default: cmd->return_code = CXL_MBOX_CMD_RC_INPUT; return -EINVAL; } return 0; } static int mock_freeze_security(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { if (cmd->size_in != 0) return -EINVAL; if (cmd->size_out != 0) return -EINVAL; if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) return 0; mdata->security_state |= CXL_PMEM_SEC_STATE_FROZEN; return 0; } static int mock_unlock_security(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { if (cmd->size_in != NVDIMM_PASSPHRASE_LEN) return -EINVAL; if (cmd->size_out != 0) return -EINVAL; if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (memcmp(cmd->payload_in, mdata->user_pass, NVDIMM_PASSPHRASE_LEN)) { if (++mdata->user_limit == PASS_TRY_LIMIT) mdata->security_state |= CXL_PMEM_SEC_STATE_USER_PLIMIT; cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; return -ENXIO; } mdata->user_limit = 0; mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED; return 0; } static int mock_passphrase_secure_erase(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { struct cxl_pass_erase *erase; if (cmd->size_in != sizeof(*erase)) return -EINVAL; if (cmd->size_out != 0) return -EINVAL; erase = cmd->payload_in; if (mdata->security_state & CXL_PMEM_SEC_STATE_FROZEN) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PLIMIT && erase->type == CXL_PMEM_SEC_PASS_USER) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PLIMIT && erase->type == CXL_PMEM_SEC_PASS_MASTER) { cmd->return_code = CXL_MBOX_CMD_RC_SECURITY; return -ENXIO; } switch (erase->type) { case CXL_PMEM_SEC_PASS_MASTER: /* * The spec does not clearly define the behavior of the scenario * where a master passphrase is passed in while the master * passphrase is not set and user passphrase is not set. The * code will take the assumption that it will behave the same * as a CXL secure erase command without passphrase (0x4401). */ if (mdata->security_state & CXL_PMEM_SEC_STATE_MASTER_PASS_SET) { if (memcmp(mdata->master_pass, erase->pass, NVDIMM_PASSPHRASE_LEN)) { master_plimit_check(mdata); cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; return -ENXIO; } mdata->master_limit = 0; mdata->user_limit = 0; mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET; memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); mdata->security_state &= ~CXL_PMEM_SEC_STATE_LOCKED; } else { /* * CXL rev3 8.2.9.8.6.3 Disable Passphrase * When master passphrase is disabled, the device shall * return Invalid Input for the Passphrase Secure Erase * command with master passphrase. */ return -EINVAL; } /* Scramble encryption keys so that data is effectively erased */ break; case CXL_PMEM_SEC_PASS_USER: /* * The spec does not clearly define the behavior of the scenario * where a user passphrase is passed in while the user * passphrase is not set. The code will take the assumption that * it will behave the same as a CXL secure erase command without * passphrase (0x4401). */ if (mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET) { if (memcmp(mdata->user_pass, erase->pass, NVDIMM_PASSPHRASE_LEN)) { user_plimit_check(mdata); cmd->return_code = CXL_MBOX_CMD_RC_PASSPHRASE; return -ENXIO; } mdata->user_limit = 0; mdata->security_state &= ~CXL_PMEM_SEC_STATE_USER_PASS_SET; memset(mdata->user_pass, 0, NVDIMM_PASSPHRASE_LEN); } /* * CXL rev3 Table 8-118 * If user passphrase is not set or supported by device, current * passphrase value is ignored. Will make the assumption that * the operation will proceed as secure erase w/o passphrase * since spec is not explicit. */ /* Scramble encryption keys so that data is effectively erased */ break; default: return -EINVAL; } return 0; } static int mock_get_lsa(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_get_lsa *get_lsa = cmd->payload_in; void *lsa = mdata->lsa; u32 offset, length; if (sizeof(*get_lsa) > cmd->size_in) return -EINVAL; offset = le32_to_cpu(get_lsa->offset); length = le32_to_cpu(get_lsa->length); if (offset + length > LSA_SIZE) return -EINVAL; if (length > cmd->size_out) return -EINVAL; memcpy(cmd->payload_out, lsa + offset, length); return 0; } static int mock_set_lsa(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_set_lsa *set_lsa = cmd->payload_in; void *lsa = mdata->lsa; u32 offset, length; if (sizeof(*set_lsa) > cmd->size_in) return -EINVAL; offset = le32_to_cpu(set_lsa->offset); length = cmd->size_in - sizeof(*set_lsa); if (offset + length > LSA_SIZE) return -EINVAL; memcpy(lsa + offset, &set_lsa->data[0], length); return 0; } static int mock_health_info(struct cxl_mbox_cmd *cmd) { struct cxl_mbox_health_info health_info = { /* set flags for maint needed, perf degraded, hw replacement */ .health_status = 0x7, /* set media status to "All Data Lost" */ .media_status = 0x3, /* * set ext_status flags for: * ext_life_used: normal, * ext_temperature: critical, * ext_corrected_volatile: warning, * ext_corrected_persistent: normal, */ .ext_status = 0x18, .life_used = 15, .temperature = cpu_to_le16(25), .dirty_shutdowns = cpu_to_le32(10), .volatile_errors = cpu_to_le32(20), .pmem_errors = cpu_to_le32(30), }; if (cmd->size_out < sizeof(health_info)) return -EINVAL; memcpy(cmd->payload_out, &health_info, sizeof(health_info)); return 0; } static struct mock_poison { struct cxl_dev_state *cxlds; u64 dpa; } mock_poison_list[MOCK_INJECT_TEST_MAX]; static struct cxl_mbox_poison_out * cxl_get_injected_po(struct cxl_dev_state *cxlds, u64 offset, u64 length) { struct cxl_mbox_poison_out *po; int nr_records = 0; u64 dpa; po = kzalloc(struct_size(po, record, poison_inject_dev_max), GFP_KERNEL); if (!po) return NULL; for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { if (mock_poison_list[i].cxlds != cxlds) continue; if (mock_poison_list[i].dpa < offset || mock_poison_list[i].dpa > offset + length - 1) continue; dpa = mock_poison_list[i].dpa + CXL_POISON_SOURCE_INJECTED; po->record[nr_records].address = cpu_to_le64(dpa); po->record[nr_records].length = cpu_to_le32(1); nr_records++; if (nr_records == poison_inject_dev_max) break; } /* Always return count, even when zero */ po->count = cpu_to_le16(nr_records); return po; } static int mock_get_poison(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_poison_in *pi = cmd->payload_in; struct cxl_mbox_poison_out *po; u64 offset = le64_to_cpu(pi->offset); u64 length = le64_to_cpu(pi->length); int nr_records; po = cxl_get_injected_po(cxlds, offset, length); if (!po) return -ENOMEM; nr_records = le16_to_cpu(po->count); memcpy(cmd->payload_out, po, struct_size(po, record, nr_records)); cmd->size_out = struct_size(po, record, nr_records); kfree(po); return 0; } static bool mock_poison_dev_max_injected(struct cxl_dev_state *cxlds) { int count = 0; for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { if (mock_poison_list[i].cxlds == cxlds) count++; } return (count >= poison_inject_dev_max); } static int mock_poison_add(struct cxl_dev_state *cxlds, u64 dpa) { /* Return EBUSY to match the CXL driver handling */ if (mock_poison_dev_max_injected(cxlds)) { dev_dbg(cxlds->dev, "Device poison injection limit has been reached: %d\n", poison_inject_dev_max); return -EBUSY; } for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { if (!mock_poison_list[i].cxlds) { mock_poison_list[i].cxlds = cxlds; mock_poison_list[i].dpa = dpa; return 0; } } dev_dbg(cxlds->dev, "Mock test poison injection limit has been reached: %d\n", MOCK_INJECT_TEST_MAX); return -ENXIO; } static bool mock_poison_found(struct cxl_dev_state *cxlds, u64 dpa) { for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { if (mock_poison_list[i].cxlds == cxlds && mock_poison_list[i].dpa == dpa) return true; } return false; } static int mock_inject_poison(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_inject_poison *pi = cmd->payload_in; u64 dpa = le64_to_cpu(pi->address); if (mock_poison_found(cxlds, dpa)) { /* Not an error to inject poison if already poisoned */ dev_dbg(cxlds->dev, "DPA: 0x%llx already poisoned\n", dpa); return 0; } return mock_poison_add(cxlds, dpa); } static bool mock_poison_del(struct cxl_dev_state *cxlds, u64 dpa) { for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { if (mock_poison_list[i].cxlds == cxlds && mock_poison_list[i].dpa == dpa) { mock_poison_list[i].cxlds = NULL; return true; } } return false; } static int mock_clear_poison(struct cxl_dev_state *cxlds, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_clear_poison *pi = cmd->payload_in; u64 dpa = le64_to_cpu(pi->address); /* * A real CXL device will write pi->write_data to the address * being cleared. In this mock, just delete this address from * the mock poison list. */ if (!mock_poison_del(cxlds, dpa)) dev_dbg(cxlds->dev, "DPA: 0x%llx not in poison list\n", dpa); return 0; } static bool mock_poison_list_empty(void) { for (int i = 0; i < MOCK_INJECT_TEST_MAX; i++) { if (mock_poison_list[i].cxlds) return false; } return true; } static ssize_t poison_inject_max_show(struct device_driver *drv, char *buf) { return sysfs_emit(buf, "%u\n", poison_inject_dev_max); } static ssize_t poison_inject_max_store(struct device_driver *drv, const char *buf, size_t len) { int val; if (kstrtoint(buf, 0, &val) < 0) return -EINVAL; if (!mock_poison_list_empty()) return -EBUSY; if (val <= MOCK_INJECT_TEST_MAX) poison_inject_dev_max = val; else return -EINVAL; return len; } static DRIVER_ATTR_RW(poison_inject_max); static struct attribute *cxl_mock_mem_core_attrs[] = { &driver_attr_poison_inject_max.attr, NULL }; ATTRIBUTE_GROUPS(cxl_mock_mem_core); static int mock_fw_info(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_get_fw_info fw_info = { .num_slots = FW_SLOTS, .slot_info = (mdata->fw_slot & 0x7) | ((mdata->fw_staged & 0x7) << 3), .activation_cap = 0, }; strcpy(fw_info.slot_1_revision, "cxl_test_fw_001"); strcpy(fw_info.slot_2_revision, "cxl_test_fw_002"); strcpy(fw_info.slot_3_revision, "cxl_test_fw_003"); strcpy(fw_info.slot_4_revision, ""); if (cmd->size_out < sizeof(fw_info)) return -EINVAL; memcpy(cmd->payload_out, &fw_info, sizeof(fw_info)); return 0; } static int mock_transfer_fw(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_transfer_fw *transfer = cmd->payload_in; void *fw = mdata->fw; size_t offset, length; offset = le32_to_cpu(transfer->offset) * CXL_FW_TRANSFER_ALIGNMENT; length = cmd->size_in - sizeof(*transfer); if (offset + length > FW_SIZE) return -EINVAL; switch (transfer->action) { case CXL_FW_TRANSFER_ACTION_FULL: if (offset != 0) return -EINVAL; fallthrough; case CXL_FW_TRANSFER_ACTION_END: if (transfer->slot == 0 || transfer->slot > FW_SLOTS) return -EINVAL; mdata->fw_size = offset + length; break; case CXL_FW_TRANSFER_ACTION_INITIATE: case CXL_FW_TRANSFER_ACTION_CONTINUE: break; case CXL_FW_TRANSFER_ACTION_ABORT: return 0; default: return -EINVAL; } memcpy(fw + offset, transfer->data, length); usleep_range(1500, 2000); return 0; } static int mock_activate_fw(struct cxl_mockmem_data *mdata, struct cxl_mbox_cmd *cmd) { struct cxl_mbox_activate_fw *activate = cmd->payload_in; if (activate->slot == 0 || activate->slot > FW_SLOTS) return -EINVAL; switch (activate->action) { case CXL_FW_ACTIVATE_ONLINE: mdata->fw_slot = activate->slot; mdata->fw_staged = 0; return 0; case CXL_FW_ACTIVATE_OFFLINE: mdata->fw_staged = activate->slot; return 0; } return -EINVAL; } static int cxl_mock_mbox_send(struct cxl_mailbox *cxl_mbox, struct cxl_mbox_cmd *cmd) { struct device *dev = cxl_mbox->host; struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); struct cxl_memdev_state *mds = mdata->mds; struct cxl_dev_state *cxlds = &mds->cxlds; int rc = -EIO; switch (cmd->opcode) { case CXL_MBOX_OP_SET_TIMESTAMP: rc = mock_set_timestamp(cxlds, cmd); break; case CXL_MBOX_OP_GET_SUPPORTED_LOGS: rc = mock_gsl(cmd); break; case CXL_MBOX_OP_GET_LOG: rc = mock_get_log(mds, cmd); break; case CXL_MBOX_OP_IDENTIFY: if (cxlds->rcd) rc = mock_rcd_id(cmd); else rc = mock_id(cmd); break; case CXL_MBOX_OP_GET_LSA: rc = mock_get_lsa(mdata, cmd); break; case CXL_MBOX_OP_GET_PARTITION_INFO: rc = mock_partition_info(cmd); break; case CXL_MBOX_OP_GET_EVENT_RECORD: rc = mock_get_event(dev, cmd); break; case CXL_MBOX_OP_CLEAR_EVENT_RECORD: rc = mock_clear_event(dev, cmd); break; case CXL_MBOX_OP_SET_LSA: rc = mock_set_lsa(mdata, cmd); break; case CXL_MBOX_OP_GET_HEALTH_INFO: rc = mock_health_info(cmd); break; case CXL_MBOX_OP_SANITIZE: rc = mock_sanitize(mdata, cmd); break; case CXL_MBOX_OP_SECURE_ERASE: rc = mock_secure_erase(mdata, cmd); break; case CXL_MBOX_OP_GET_SECURITY_STATE: rc = mock_get_security_state(mdata, cmd); break; case CXL_MBOX_OP_SET_PASSPHRASE: rc = mock_set_passphrase(mdata, cmd); break; case CXL_MBOX_OP_DISABLE_PASSPHRASE: rc = mock_disable_passphrase(mdata, cmd); break; case CXL_MBOX_OP_FREEZE_SECURITY: rc = mock_freeze_security(mdata, cmd); break; case CXL_MBOX_OP_UNLOCK: rc = mock_unlock_security(mdata, cmd); break; case CXL_MBOX_OP_PASSPHRASE_SECURE_ERASE: rc = mock_passphrase_secure_erase(mdata, cmd); break; case CXL_MBOX_OP_GET_POISON: rc = mock_get_poison(cxlds, cmd); break; case CXL_MBOX_OP_INJECT_POISON: rc = mock_inject_poison(cxlds, cmd); break; case CXL_MBOX_OP_CLEAR_POISON: rc = mock_clear_poison(cxlds, cmd); break; case CXL_MBOX_OP_GET_FW_INFO: rc = mock_fw_info(mdata, cmd); break; case CXL_MBOX_OP_TRANSFER_FW: rc = mock_transfer_fw(mdata, cmd); break; case CXL_MBOX_OP_ACTIVATE_FW: rc = mock_activate_fw(mdata, cmd); break; default: break; } dev_dbg(dev, "opcode: %#x sz_in: %zd sz_out: %zd rc: %d\n", cmd->opcode, cmd->size_in, cmd->size_out, rc); return rc; } static void label_area_release(void *lsa) { vfree(lsa); } static void fw_buf_release(void *buf) { vfree(buf); } static bool is_rcd(struct platform_device *pdev) { const struct platform_device_id *id = platform_get_device_id(pdev); return !!id->driver_data; } static ssize_t event_trigger_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { cxl_mock_event_trigger(dev); return count; } static DEVICE_ATTR_WO(event_trigger); static int cxl_mock_mailbox_create(struct cxl_dev_state *cxlds) { int rc; rc = cxl_mailbox_init(&cxlds->cxl_mbox, cxlds->dev); if (rc) return rc; return 0; } static int cxl_mock_mem_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct cxl_memdev *cxlmd; struct cxl_memdev_state *mds; struct cxl_dev_state *cxlds; struct cxl_mockmem_data *mdata; struct cxl_mailbox *cxl_mbox; int rc; mdata = devm_kzalloc(dev, sizeof(*mdata), GFP_KERNEL); if (!mdata) return -ENOMEM; dev_set_drvdata(dev, mdata); mdata->lsa = vmalloc(LSA_SIZE); if (!mdata->lsa) return -ENOMEM; mdata->fw = vmalloc(FW_SIZE); if (!mdata->fw) return -ENOMEM; mdata->fw_slot = 2; rc = devm_add_action_or_reset(dev, label_area_release, mdata->lsa); if (rc) return rc; rc = devm_add_action_or_reset(dev, fw_buf_release, mdata->fw); if (rc) return rc; mds = cxl_memdev_state_create(dev); if (IS_ERR(mds)) return PTR_ERR(mds); cxlds = &mds->cxlds; rc = cxl_mock_mailbox_create(cxlds); if (rc) return rc; cxl_mbox = &mds->cxlds.cxl_mbox; mdata->mds = mds; cxl_mbox->mbox_send = cxl_mock_mbox_send; cxl_mbox->payload_size = SZ_4K; mds->event.buf = (struct cxl_get_event_payload *) mdata->event_buf; INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mockmem_sanitize_work); cxlds->serial = pdev->id; if (is_rcd(pdev)) cxlds->rcd = true; rc = cxl_enumerate_cmds(mds); if (rc) return rc; rc = cxl_poison_state_init(mds); if (rc) return rc; rc = cxl_set_timestamp(mds); if (rc) return rc; cxlds->media_ready = true; rc = cxl_dev_state_identify(mds); if (rc) return rc; rc = cxl_mem_create_range_info(mds); if (rc) return rc; cxl_mock_add_event_logs(&mdata->mes); cxlmd = devm_cxl_add_memdev(&pdev->dev, cxlds); if (IS_ERR(cxlmd)) return PTR_ERR(cxlmd); rc = devm_cxl_setup_fw_upload(&pdev->dev, mds); if (rc) return rc; rc = devm_cxl_sanitize_setup_notifier(&pdev->dev, cxlmd); if (rc) return rc; cxl_mem_get_event_records(mds, CXLDEV_EVENT_STATUS_ALL); return 0; } static ssize_t security_lock_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); return sysfs_emit(buf, "%u\n", !!(mdata->security_state & CXL_PMEM_SEC_STATE_LOCKED)); } static ssize_t security_lock_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); u32 mask = CXL_PMEM_SEC_STATE_FROZEN | CXL_PMEM_SEC_STATE_USER_PLIMIT | CXL_PMEM_SEC_STATE_MASTER_PLIMIT; int val; if (kstrtoint(buf, 0, &val) < 0) return -EINVAL; if (val == 1) { if (!(mdata->security_state & CXL_PMEM_SEC_STATE_USER_PASS_SET)) return -ENXIO; mdata->security_state |= CXL_PMEM_SEC_STATE_LOCKED; mdata->security_state &= ~mask; } else { return -EINVAL; } return count; } static DEVICE_ATTR_RW(security_lock); static ssize_t fw_buf_checksum_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); u8 hash[SHA256_DIGEST_SIZE]; unsigned char *hstr, *hptr; struct sha256_state sctx; ssize_t written = 0; int i; sha256_init(&sctx); sha256_update(&sctx, mdata->fw, mdata->fw_size); sha256_final(&sctx, hash); hstr = kzalloc((SHA256_DIGEST_SIZE * 2) + 1, GFP_KERNEL); if (!hstr) return -ENOMEM; hptr = hstr; for (i = 0; i < SHA256_DIGEST_SIZE; i++) hptr += sprintf(hptr, "%02x", hash[i]); written = sysfs_emit(buf, "%s\n", hstr); kfree(hstr); return written; } static DEVICE_ATTR_RO(fw_buf_checksum); static ssize_t sanitize_timeout_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); return sysfs_emit(buf, "%lu\n", mdata->sanitize_timeout); } static ssize_t sanitize_timeout_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct cxl_mockmem_data *mdata = dev_get_drvdata(dev); unsigned long val; int rc; rc = kstrtoul(buf, 0, &val); if (rc) return rc; mdata->sanitize_timeout = val; return count; } static DEVICE_ATTR_RW(sanitize_timeout); static struct attribute *cxl_mock_mem_attrs[] = { &dev_attr_security_lock.attr, &dev_attr_event_trigger.attr, &dev_attr_fw_buf_checksum.attr, &dev_attr_sanitize_timeout.attr, NULL }; ATTRIBUTE_GROUPS(cxl_mock_mem); static const struct platform_device_id cxl_mock_mem_ids[] = { { .name = "cxl_mem", 0 }, { .name = "cxl_rcd", 1 }, { }, }; MODULE_DEVICE_TABLE(platform, cxl_mock_mem_ids); static struct platform_driver cxl_mock_mem_driver = { .probe = cxl_mock_mem_probe, .id_table = cxl_mock_mem_ids, .driver = { .name = KBUILD_MODNAME, .dev_groups = cxl_mock_mem_groups, .groups = cxl_mock_mem_core_groups, .probe_type = PROBE_PREFER_ASYNCHRONOUS, }, }; module_platform_driver(cxl_mock_mem_driver); MODULE_LICENSE("GPL v2"); MODULE_IMPORT_NS("CXL");
// SPDX-License-Identifier: GPL-2.0 /* * Texas Instruments Ethernet Switch media-access-controller (MAC) submodule/ * Ethernet MAC Sliver (CPGMAC_SL) * * Copyright (C) 2019 Texas Instruments * */ #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> #include "cpsw_sl.h" #define CPSW_SL_REG_NOTUSED U16_MAX static const u16 cpsw_sl_reg_map_cpsw[] = { [CPSW_SL_IDVER] = 0x00, [CPSW_SL_MACCONTROL] = 0x04, [CPSW_SL_MACSTATUS] = 0x08, [CPSW_SL_SOFT_RESET] = 0x0c, [CPSW_SL_RX_MAXLEN] = 0x10, [CPSW_SL_BOFFTEST] = 0x14, [CPSW_SL_RX_PAUSE] = 0x18, [CPSW_SL_TX_PAUSE] = 0x1c, [CPSW_SL_EMCONTROL] = 0x20, [CPSW_SL_RX_PRI_MAP] = 0x24, [CPSW_SL_TX_GAP] = 0x28, }; static const u16 cpsw_sl_reg_map_66ak2hk[] = { [CPSW_SL_IDVER] = 0x00, [CPSW_SL_MACCONTROL] = 0x04, [CPSW_SL_MACSTATUS] = 0x08, [CPSW_SL_SOFT_RESET] = 0x0c, [CPSW_SL_RX_MAXLEN] = 0x10, [CPSW_SL_BOFFTEST] = CPSW_SL_REG_NOTUSED, [CPSW_SL_RX_PAUSE] = 0x18, [CPSW_SL_TX_PAUSE] = 0x1c, [CPSW_SL_EMCONTROL] = 0x20, [CPSW_SL_RX_PRI_MAP] = 0x24, [CPSW_SL_TX_GAP] = CPSW_SL_REG_NOTUSED, }; static const u16 cpsw_sl_reg_map_66ak2x_xgbe[] = { [CPSW_SL_IDVER] = 0x00, [CPSW_SL_MACCONTROL] = 0x04, [CPSW_SL_MACSTATUS] = 0x08, [CPSW_SL_SOFT_RESET] = 0x0c, [CPSW_SL_RX_MAXLEN] = 0x10, [CPSW_SL_BOFFTEST] = CPSW_SL_REG_NOTUSED, [CPSW_SL_RX_PAUSE] = 0x18, [CPSW_SL_TX_PAUSE] = 0x1c, [CPSW_SL_EMCONTROL] = 0x20, [CPSW_SL_RX_PRI_MAP] = CPSW_SL_REG_NOTUSED, [CPSW_SL_TX_GAP] = 0x28, }; static const u16 cpsw_sl_reg_map_66ak2elg_am65[] = { [CPSW_SL_IDVER] = CPSW_SL_REG_NOTUSED, [CPSW_SL_MACCONTROL] = 0x00, [CPSW_SL_MACSTATUS] = 0x04, [CPSW_SL_SOFT_RESET] = 0x08, [CPSW_SL_RX_MAXLEN] = CPSW_SL_REG_NOTUSED, [CPSW_SL_BOFFTEST] = 0x0c, [CPSW_SL_RX_PAUSE] = 0x10, [CPSW_SL_TX_PAUSE] = 0x40, [CPSW_SL_EMCONTROL] = 0x70, [CPSW_SL_RX_PRI_MAP] = CPSW_SL_REG_NOTUSED, [CPSW_SL_TX_GAP] = 0x74, }; #define CPSW_SL_SOFT_RESET_BIT BIT(0) #define CPSW_SL_STATUS_PN_IDLE BIT(31) #define CPSW_SL_AM65_STATUS_PN_E_IDLE BIT(30) #define CPSW_SL_AM65_STATUS_PN_P_IDLE BIT(29) #define CPSW_SL_AM65_STATUS_PN_TX_IDLE BIT(28) #define CPSW_SL_STATUS_IDLE_MASK_BASE (CPSW_SL_STATUS_PN_IDLE) #define CPSW_SL_STATUS_IDLE_MASK_K3 \ (CPSW_SL_STATUS_IDLE_MASK_BASE | CPSW_SL_AM65_STATUS_PN_E_IDLE | \ CPSW_SL_AM65_STATUS_PN_P_IDLE | CPSW_SL_AM65_STATUS_PN_TX_IDLE) #define CPSW_SL_CTL_FUNC_BASE \ (CPSW_SL_CTL_FULLDUPLEX |\ CPSW_SL_CTL_LOOPBACK |\ CPSW_SL_CTL_RX_FLOW_EN |\ CPSW_SL_CTL_TX_FLOW_EN |\ CPSW_SL_CTL_GMII_EN |\ CPSW_SL_CTL_TX_PACE |\ CPSW_SL_CTL_GIG |\ CPSW_SL_CTL_CMD_IDLE |\ CPSW_SL_CTL_IFCTL_A |\ CPSW_SL_CTL_IFCTL_B |\ CPSW_SL_CTL_GIG_FORCE |\ CPSW_SL_CTL_EXT_EN |\ CPSW_SL_CTL_RX_CEF_EN |\ CPSW_SL_CTL_RX_CSF_EN |\ CPSW_SL_CTL_RX_CMF_EN) struct cpsw_sl { struct device *dev; void __iomem *sl_base; const u16 *regs; u32 control_features; u32 idle_mask; }; struct cpsw_sl_dev_id { const char *device_id; const u16 *regs; const u32 control_features; const u32 regs_offset; const u32 idle_mask; }; static const struct cpsw_sl_dev_id cpsw_sl_id_match[] = { { .device_id = "cpsw", .regs = cpsw_sl_reg_map_cpsw, .control_features = CPSW_SL_CTL_FUNC_BASE | CPSW_SL_CTL_MTEST | CPSW_SL_CTL_TX_SHORT_GAP_EN | CPSW_SL_CTL_TX_SG_LIM_EN, .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE, }, { .device_id = "66ak2hk", .regs = cpsw_sl_reg_map_66ak2hk, .control_features = CPSW_SL_CTL_FUNC_BASE | CPSW_SL_CTL_TX_SHORT_GAP_EN, .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE, }, { .device_id = "66ak2x_xgbe", .regs = cpsw_sl_reg_map_66ak2x_xgbe, .control_features = CPSW_SL_CTL_FUNC_BASE | CPSW_SL_CTL_XGIG | CPSW_SL_CTL_TX_SHORT_GAP_EN | CPSW_SL_CTL_CRC_TYPE | CPSW_SL_CTL_XGMII_EN, .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE, }, { .device_id = "66ak2el", .regs = cpsw_sl_reg_map_66ak2elg_am65, .regs_offset = 0x330, .control_features = CPSW_SL_CTL_FUNC_BASE | CPSW_SL_CTL_MTEST | CPSW_SL_CTL_TX_SHORT_GAP_EN | CPSW_SL_CTL_CRC_TYPE | CPSW_SL_CTL_EXT_EN_RX_FLO | CPSW_SL_CTL_EXT_EN_TX_FLO | CPSW_SL_CTL_TX_SG_LIM_EN, .idle_mask = CPSW_SL_STATUS_IDLE_MASK_BASE, }, { .device_id = "66ak2g", .regs = cpsw_sl_reg_map_66ak2elg_am65, .regs_offset = 0x330, .control_features = CPSW_SL_CTL_FUNC_BASE | CPSW_SL_CTL_MTEST | CPSW_SL_CTL_CRC_TYPE | CPSW_SL_CTL_EXT_EN_RX_FLO | CPSW_SL_CTL_EXT_EN_TX_FLO, }, { .device_id = "am65", .regs = cpsw_sl_reg_map_66ak2elg_am65, .regs_offset = 0x330, .control_features = CPSW_SL_CTL_FUNC_BASE | CPSW_SL_CTL_MTEST | CPSW_SL_CTL_XGIG | CPSW_SL_CTL_TX_SHORT_GAP_EN | CPSW_SL_CTL_CRC_TYPE | CPSW_SL_CTL_XGMII_EN | CPSW_SL_CTL_EXT_EN_RX_FLO | CPSW_SL_CTL_EXT_EN_TX_FLO | CPSW_SL_CTL_TX_SG_LIM_EN | CPSW_SL_CTL_EXT_EN_XGIG, .idle_mask = CPSW_SL_STATUS_IDLE_MASK_K3, }, { }, }; u32 cpsw_sl_reg_read(struct cpsw_sl *sl, enum cpsw_sl_regs reg) { int val; if (sl->regs[reg] == CPSW_SL_REG_NOTUSED) { dev_err(sl->dev, "cpsw_sl: not sup r reg: %04X\n", sl->regs[reg]); return 0; } val = readl(sl->sl_base + sl->regs[reg]); dev_dbg(sl->dev, "cpsw_sl: reg: %04X r 0x%08X\n", sl->regs[reg], val); return val; } void cpsw_sl_reg_write(struct cpsw_sl *sl, enum cpsw_sl_regs reg, u32 val) { if (sl->regs[reg] == CPSW_SL_REG_NOTUSED) { dev_err(sl->dev, "cpsw_sl: not sup w reg: %04X\n", sl->regs[reg]); return; } dev_dbg(sl->dev, "cpsw_sl: reg: %04X w 0x%08X\n", sl->regs[reg], val); writel(val, sl->sl_base + sl->regs[reg]); } static const struct cpsw_sl_dev_id *cpsw_sl_match_id( const struct cpsw_sl_dev_id *id, const char *device_id) { if (!id || !device_id) return NULL; while (id->device_id) { if (strcmp(device_id, id->device_id) == 0) return id; id++; } return NULL; } struct cpsw_sl *cpsw_sl_get(const char *device_id, struct device *dev, void __iomem *sl_base) { const struct cpsw_sl_dev_id *sl_dev_id; struct cpsw_sl *sl; sl = devm_kzalloc(dev, sizeof(struct cpsw_sl), GFP_KERNEL); if (!sl) return ERR_PTR(-ENOMEM); sl->dev = dev; sl->sl_base = sl_base; sl_dev_id = cpsw_sl_match_id(cpsw_sl_id_match, device_id); if (!sl_dev_id) { dev_err(sl->dev, "cpsw_sl: dev_id %s not found.\n", device_id); return ERR_PTR(-EINVAL); } sl->regs = sl_dev_id->regs; sl->control_features = sl_dev_id->control_features; sl->idle_mask = sl_dev_id->idle_mask; sl->sl_base += sl_dev_id->regs_offset; return sl; } void cpsw_sl_reset(struct cpsw_sl *sl, unsigned long tmo) { unsigned long timeout = jiffies + msecs_to_jiffies(tmo); /* Set the soft reset bit */ cpsw_sl_reg_write(sl, CPSW_SL_SOFT_RESET, CPSW_SL_SOFT_RESET_BIT); /* Wait for the bit to clear */ do { usleep_range(100, 200); } while ((cpsw_sl_reg_read(sl, CPSW_SL_SOFT_RESET) & CPSW_SL_SOFT_RESET_BIT) && time_after(timeout, jiffies)); if (cpsw_sl_reg_read(sl, CPSW_SL_SOFT_RESET) & CPSW_SL_SOFT_RESET_BIT) dev_err(sl->dev, "cpsw_sl failed to soft-reset.\n"); } u32 cpsw_sl_ctl_set(struct cpsw_sl *sl, u32 ctl_funcs) { u32 val; if (ctl_funcs & ~sl->control_features) { dev_err(sl->dev, "cpsw_sl: unsupported func 0x%08X\n", ctl_funcs & (~sl->control_features)); return -EINVAL; } val = cpsw_sl_reg_read(sl, CPSW_SL_MACCONTROL); val |= ctl_funcs; cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, val); return 0; } u32 cpsw_sl_ctl_clr(struct cpsw_sl *sl, u32 ctl_funcs) { u32 val; if (ctl_funcs & ~sl->control_features) { dev_err(sl->dev, "cpsw_sl: unsupported func 0x%08X\n", ctl_funcs & (~sl->control_features)); return -EINVAL; } val = cpsw_sl_reg_read(sl, CPSW_SL_MACCONTROL); val &= ~ctl_funcs; cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, val); return 0; } void cpsw_sl_ctl_reset(struct cpsw_sl *sl) { cpsw_sl_reg_write(sl, CPSW_SL_MACCONTROL, 0); } int cpsw_sl_wait_for_idle(struct cpsw_sl *sl, unsigned long tmo) { unsigned long timeout = jiffies + msecs_to_jiffies(tmo); do { usleep_range(100, 200); } while (!(cpsw_sl_reg_read(sl, CPSW_SL_MACSTATUS) & sl->idle_mask) && time_after(timeout, jiffies)); if (!(cpsw_sl_reg_read(sl, CPSW_SL_MACSTATUS) & sl->idle_mask)) { dev_err(sl->dev, "cpsw_sl failed to soft-reset.\n"); return -ETIMEDOUT; } return 0; }
// SPDX-License-Identifier: GPL-2.0 /* OCTEON 3XXX DTS common parts. */ /dts-v1/; / { compatible = "cavium,octeon-3860"; #address-cells = <2>; #size-cells = <2>; interrupt-parent = <&ciu>; soc@0 { compatible = "simple-bus"; #address-cells = <2>; #size-cells = <2>; ranges; /* Direct mapping */ ciu: interrupt-controller@1070000000000 { compatible = "cavium,octeon-3860-ciu"; interrupt-controller; /* Interrupts are specified by two parts: * 1) Controller register (0 or 1) * 2) Bit within the register (0..63) */ #interrupt-cells = <2>; reg = <0x10700 0x00000000 0x0 0x7000>; }; gpio: gpio-controller@1070000000800 { #gpio-cells = <2>; compatible = "cavium,octeon-3860-gpio"; reg = <0x10700 0x00000800 0x0 0x100>; gpio-controller; /* Interrupts are specified by two parts: * 1) GPIO pin number (0..15) * 2) Triggering (1 - edge rising * 2 - edge falling * 4 - level active high * 8 - level active low) */ interrupt-controller; #interrupt-cells = <2>; /* The GPIO pin connect to 16 consecutive CUI bits */ interrupts = <0 16>, <0 17>, <0 18>, <0 19>, <0 20>, <0 21>, <0 22>, <0 23>, <0 24>, <0 25>, <0 26>, <0 27>, <0 28>, <0 29>, <0 30>, <0 31>; }; smi0: mdio@1180000001800 { compatible = "cavium,octeon-3860-mdio"; #address-cells = <1>; #size-cells = <0>; reg = <0x11800 0x00001800 0x0 0x40>; }; pip: pip@11800a0000000 { compatible = "cavium,octeon-3860-pip"; #address-cells = <1>; #size-cells = <0>; reg = <0x11800 0xa0000000 0x0 0x2000>; interface@0 { compatible = "cavium,octeon-3860-pip-interface"; #address-cells = <1>; #size-cells = <0>; reg = <0>; /* interface */ ethernet@0 { compatible = "cavium,octeon-3860-pip-port"; reg = <0x0>; /* Port */ local-mac-address = [ 00 00 00 00 00 00 ]; }; ethernet@1 { compatible = "cavium,octeon-3860-pip-port"; reg = <0x1>; /* Port */ local-mac-address = [ 00 00 00 00 00 00 ]; }; ethernet@2 { compatible = "cavium,octeon-3860-pip-port"; reg = <0x2>; /* Port */ local-mac-address = [ 00 00 00 00 00 00 ]; }; }; interface@1 { compatible = "cavium,octeon-3860-pip-interface"; #address-cells = <1>; #size-cells = <0>; reg = <1>; /* interface */ }; }; twsi0: i2c@1180000001000 { #address-cells = <1>; #size-cells = <0>; compatible = "cavium,octeon-3860-twsi"; reg = <0x11800 0x00001000 0x0 0x200>; interrupts = <0 45>; clock-frequency = <100000>; }; uart0: serial@1180000000800 { compatible = "cavium,octeon-3860-uart","ns16550"; reg = <0x11800 0x00000800 0x0 0x400>; clock-frequency = <0>; current-speed = <115200>; reg-shift = <3>; interrupts = <0 34>; }; bootbus: bootbus@1180000000000 { compatible = "cavium,octeon-3860-bootbus"; reg = <0x11800 0x00000000 0x0 0x200>; /* The chip select number and offset */ #address-cells = <2>; /* The size of the chip select region */ #size-cells = <1>; ranges = <0 0 0x0 0x1f400000 0xc00000>, <1 0 0x10000 0x30000000 0>, <2 0 0x10000 0x40000000 0>, <3 0 0x10000 0x50000000 0>, <4 0 0x0 0x1d020000 0x10000>, <5 0 0x0 0x1d040000 0x10000>, <6 0 0x0 0x1d050000 0x10000>, <7 0 0x10000 0x90000000 0>; cavium,cs-config@0 { compatible = "cavium,octeon-3860-bootbus-config"; cavium,cs-index = <0>; cavium,t-adr = <20>; cavium,t-ce = <60>; cavium,t-oe = <60>; cavium,t-we = <45>; cavium,t-rd-hld = <35>; cavium,t-wr-hld = <45>; cavium,t-pause = <0>; cavium,t-wait = <0>; cavium,t-page = <35>; cavium,t-rd-dly = <0>; cavium,pages = <0>; cavium,bus-width = <8>; }; cavium,cs-config@4 { compatible = "cavium,octeon-3860-bootbus-config"; cavium,cs-index = <4>; cavium,t-adr = <320>; cavium,t-ce = <320>; cavium,t-oe = <320>; cavium,t-we = <320>; cavium,t-rd-hld = <320>; cavium,t-wr-hld = <320>; cavium,t-pause = <320>; cavium,t-wait = <320>; cavium,t-page = <320>; cavium,t-rd-dly = <0>; cavium,pages = <0>; cavium,bus-width = <8>; }; cavium,cs-config@5 { compatible = "cavium,octeon-3860-bootbus-config"; cavium,cs-index = <5>; cavium,t-adr = <5>; cavium,t-ce = <300>; cavium,t-oe = <125>; cavium,t-we = <150>; cavium,t-rd-hld = <100>; cavium,t-wr-hld = <30>; cavium,t-pause = <0>; cavium,t-wait = <30>; cavium,t-page = <320>; cavium,t-rd-dly = <0>; cavium,pages = <0>; cavium,bus-width = <16>; }; cavium,cs-config@6 { compatible = "cavium,octeon-3860-bootbus-config"; cavium,cs-index = <6>; cavium,t-adr = <5>; cavium,t-ce = <300>; cavium,t-oe = <270>; cavium,t-we = <150>; cavium,t-rd-hld = <100>; cavium,t-wr-hld = <70>; cavium,t-pause = <0>; cavium,t-wait = <0>; cavium,t-page = <320>; cavium,t-rd-dly = <0>; cavium,pages = <0>; cavium,wait-mode; cavium,bus-width = <16>; }; flash0: nor@0,0 { compatible = "cfi-flash"; reg = <0 0 0x800000>; #address-cells = <1>; #size-cells = <1>; }; }; dma0: dma-engine@1180000000100 { compatible = "cavium,octeon-5750-bootbus-dma"; reg = <0x11800 0x00000100 0x0 0x8>; interrupts = <0 63>; }; dma1: dma-engine@1180000000108 { compatible = "cavium,octeon-5750-bootbus-dma"; reg = <0x11800 0x00000108 0x0 0x8>; interrupts = <0 63>; }; usbn: usbn@1180068000000 { compatible = "cavium,octeon-5750-usbn"; reg = <0x11800 0x68000000 0x0 0x1000>; ranges; /* Direct mapping */ #address-cells = <2>; #size-cells = <2>; usbc@16f0010000000 { compatible = "cavium,octeon-5750-usbc"; reg = <0x16f00 0x10000000 0x0 0x80000>; interrupts = <0 56>; }; }; }; };
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2020 Texas Instruments Incorporated - https://www.ti.com * Author: Peter Ujfalusi <[email protected]> */ #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/dmaengine_pcm.h> #include "udma-pcm.h" static const struct snd_pcm_hardware udma_pcm_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_NO_PERIOD_WAKEUP | SNDRV_PCM_INFO_INTERLEAVED, .buffer_bytes_max = SIZE_MAX, .period_bytes_min = 32, .period_bytes_max = SZ_64K, .periods_min = 2, .periods_max = UINT_MAX, }; static const struct snd_dmaengine_pcm_config udma_dmaengine_pcm_config = { .pcm_hardware = &udma_pcm_hardware, .prepare_slave_config = snd_dmaengine_pcm_prepare_slave_config, }; int udma_pcm_platform_register(struct device *dev) { return devm_snd_dmaengine_pcm_register(dev, &udma_dmaengine_pcm_config, 0); } EXPORT_SYMBOL_GPL(udma_pcm_platform_register); MODULE_AUTHOR("Peter Ujfalusi <[email protected]>"); MODULE_DESCRIPTION("UDMA PCM ASoC platform driver"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef __842_H__ #define __842_H__ /* The 842 compressed format is made up of multiple blocks, each of * which have the format: * * <template>[arg1][arg2][arg3][arg4] * * where there are between 0 and 4 template args, depending on the specific * template operation. For normal operations, each arg is either a specific * number of data bytes to add to the output buffer, or an index pointing * to a previously-written number of data bytes to copy to the output buffer. * * The template code is a 5-bit value. This code indicates what to do with * the following data. Template codes from 0 to 0x19 should use the template * table, the static "decomp_ops" table used in decompress. For each template * (table row), there are between 1 and 4 actions; each action corresponds to * an arg following the template code bits. Each action is either a "data" * type action, or a "index" type action, and each action results in 2, 4, or 8 * bytes being written to the output buffer. Each template (i.e. all actions * in the table row) will add up to 8 bytes being written to the output buffer. * Any row with less than 4 actions is padded with noop actions, indicated by * N0 (for which there is no corresponding arg in the compressed data buffer). * * "Data" actions, indicated in the table by D2, D4, and D8, mean that the * corresponding arg is 2, 4, or 8 bytes, respectively, in the compressed data * buffer should be copied directly to the output buffer. * * "Index" actions, indicated in the table by I2, I4, and I8, mean the * corresponding arg is an index parameter that points to, respectively, a 2, * 4, or 8 byte value already in the output buffer, that should be copied to * the end of the output buffer. Essentially, the index points to a position * in a ring buffer that contains the last N bytes of output buffer data. * The number of bits for each index's arg are: 8 bits for I2, 9 bits for I4, * and 8 bits for I8. Since each index points to a 2, 4, or 8 byte section, * this means that I2 can reference 512 bytes ((2^8 bits = 256) * 2 bytes), I4 * can reference 2048 bytes ((2^9 = 512) * 4 bytes), and I8 can reference 2048 * bytes ((2^8 = 256) * 8 bytes). Think of it as a kind-of ring buffer for * each of I2, I4, and I8 that are updated for each byte written to the output * buffer. In this implementation, the output buffer is directly used for each * index; there is no additional memory required. Note that the index is into * a ring buffer, not a sliding window; for example, if there have been 260 * bytes written to the output buffer, an I2 index of 0 would index to byte 256 * in the output buffer, while an I2 index of 16 would index to byte 16 in the * output buffer. * * There are also 3 special template codes; 0x1b for "repeat", 0x1c for * "zeros", and 0x1e for "end". The "repeat" operation is followed by a 6 bit * arg N indicating how many times to repeat. The last 8 bytes written to the * output buffer are written again to the output buffer, N + 1 times. The * "zeros" operation, which has no arg bits, writes 8 zeros to the output * buffer. The "end" operation, which also has no arg bits, signals the end * of the compressed data. There may be some number of padding (don't care, * but usually 0) bits after the "end" operation bits, to fill the buffer * length to a specific byte multiple (usually a multiple of 8, 16, or 32 * bytes). * * This software implementation also uses one of the undefined template values, * 0x1d as a special "short data" template code, to represent less than 8 bytes * of uncompressed data. It is followed by a 3 bit arg N indicating how many * data bytes will follow, and then N bytes of data, which should be copied to * the output buffer. This allows the software 842 compressor to accept input * buffers that are not an exact multiple of 8 bytes long. However, those * compressed buffers containing this sw-only template will be rejected by * the 842 hardware decompressor, and must be decompressed with this software * library. The 842 software compression module includes a parameter to * disable using this sw-only "short data" template, and instead simply * reject any input buffer that is not a multiple of 8 bytes long. * * After all actions for each operation code are processed, another template * code is in the next 5 bits. The decompression ends once the "end" template * code is detected. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <linux/crc32.h> #include <linux/unaligned.h> #include <linux/sw842.h> /* special templates */ #define OP_REPEAT (0x1B) #define OP_ZEROS (0x1C) #define OP_END (0x1E) /* sw only template - this is not in the hw design; it's used only by this * software compressor and decompressor, to allow input buffers that aren't * a multiple of 8. */ #define OP_SHORT_DATA (0x1D) /* additional bits of each op param */ #define OP_BITS (5) #define REPEAT_BITS (6) #define SHORT_DATA_BITS (3) #define I2_BITS (8) #define I4_BITS (9) #define I8_BITS (8) #define CRC_BITS (32) #define REPEAT_BITS_MAX (0x3f) #define SHORT_DATA_BITS_MAX (0x7) /* Arbitrary values used to indicate action */ #define OP_ACTION (0x70) #define OP_ACTION_INDEX (0x10) #define OP_ACTION_DATA (0x20) #define OP_ACTION_NOOP (0x40) #define OP_AMOUNT (0x0f) #define OP_AMOUNT_0 (0x00) #define OP_AMOUNT_2 (0x02) #define OP_AMOUNT_4 (0x04) #define OP_AMOUNT_8 (0x08) #define D2 (OP_ACTION_DATA | OP_AMOUNT_2) #define D4 (OP_ACTION_DATA | OP_AMOUNT_4) #define D8 (OP_ACTION_DATA | OP_AMOUNT_8) #define I2 (OP_ACTION_INDEX | OP_AMOUNT_2) #define I4 (OP_ACTION_INDEX | OP_AMOUNT_4) #define I8 (OP_ACTION_INDEX | OP_AMOUNT_8) #define N0 (OP_ACTION_NOOP | OP_AMOUNT_0) /* the max of the regular templates - not including the special templates */ #define OPS_MAX (0x1a) #endif
/* SPDX-License-Identifier: GPL-2.0 */ #define _GNU_SOURCE #include <linux/limits.h> #include <linux/oom.h> #include <fcntl.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/stat.h> #include <sys/types.h> #include <unistd.h> #include <sys/socket.h> #include <sys/wait.h> #include <arpa/inet.h> #include <netinet/in.h> #include <netdb.h> #include <errno.h> #include <sys/mman.h> #include "../kselftest.h" #include "cgroup_util.h" static bool has_localevents; static bool has_recursiveprot; /* * This test creates two nested cgroups with and without enabling * the memory controller. */ static int test_memcg_subtree_control(const char *root) { char *parent, *child, *parent2 = NULL, *child2 = NULL; int ret = KSFT_FAIL; char buf[PAGE_SIZE]; /* Create two nested cgroups with the memory controller enabled */ parent = cg_name(root, "memcg_test_0"); child = cg_name(root, "memcg_test_0/memcg_test_1"); if (!parent || !child) goto cleanup_free; if (cg_create(parent)) goto cleanup_free; if (cg_write(parent, "cgroup.subtree_control", "+memory")) goto cleanup_parent; if (cg_create(child)) goto cleanup_parent; if (cg_read_strstr(child, "cgroup.controllers", "memory")) goto cleanup_child; /* Create two nested cgroups without enabling memory controller */ parent2 = cg_name(root, "memcg_test_1"); child2 = cg_name(root, "memcg_test_1/memcg_test_1"); if (!parent2 || !child2) goto cleanup_free2; if (cg_create(parent2)) goto cleanup_free2; if (cg_create(child2)) goto cleanup_parent2; if (cg_read(child2, "cgroup.controllers", buf, sizeof(buf))) goto cleanup_all; if (!cg_read_strstr(child2, "cgroup.controllers", "memory")) goto cleanup_all; ret = KSFT_PASS; cleanup_all: cg_destroy(child2); cleanup_parent2: cg_destroy(parent2); cleanup_free2: free(parent2); free(child2); cleanup_child: cg_destroy(child); cleanup_parent: cg_destroy(parent); cleanup_free: free(parent); free(child); return ret; } static int alloc_anon_50M_check(const char *cgroup, void *arg) { size_t size = MB(50); char *buf, *ptr; long anon, current; int ret = -1; buf = malloc(size); if (buf == NULL) { fprintf(stderr, "malloc() failed\n"); return -1; } for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE) *ptr = 0; current = cg_read_long(cgroup, "memory.current"); if (current < size) goto cleanup; if (!values_close(size, current, 3)) goto cleanup; anon = cg_read_key_long(cgroup, "memory.stat", "anon "); if (anon < 0) goto cleanup; if (!values_close(anon, current, 3)) goto cleanup; ret = 0; cleanup: free(buf); return ret; } static int alloc_pagecache_50M_check(const char *cgroup, void *arg) { size_t size = MB(50); int ret = -1; long current, file; int fd; fd = get_temp_fd(); if (fd < 0) return -1; if (alloc_pagecache(fd, size)) goto cleanup; current = cg_read_long(cgroup, "memory.current"); if (current < size) goto cleanup; file = cg_read_key_long(cgroup, "memory.stat", "file "); if (file < 0) goto cleanup; if (!values_close(file, current, 10)) goto cleanup; ret = 0; cleanup: close(fd); return ret; } /* * This test create a memory cgroup, allocates * some anonymous memory and some pagecache * and checks memory.current, memory.peak, and some memory.stat values. */ static int test_memcg_current_peak(const char *root) { int ret = KSFT_FAIL; long current, peak, peak_reset; char *memcg; bool fd2_closed = false, fd3_closed = false, fd4_closed = false; int peak_fd = -1, peak_fd2 = -1, peak_fd3 = -1, peak_fd4 = -1; struct stat ss; memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup; if (cg_create(memcg)) goto cleanup; current = cg_read_long(memcg, "memory.current"); if (current != 0) goto cleanup; peak = cg_read_long(memcg, "memory.peak"); if (peak != 0) goto cleanup; if (cg_run(memcg, alloc_anon_50M_check, NULL)) goto cleanup; peak = cg_read_long(memcg, "memory.peak"); if (peak < MB(50)) goto cleanup; /* * We'll open a few FDs for the same memory.peak file to exercise the free-path * We need at least three to be closed in a different order than writes occurred to test * the linked-list handling. */ peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); if (peak_fd == -1) { if (errno == ENOENT) ret = KSFT_SKIP; goto cleanup; } /* * Before we try to use memory.peak's fd, try to figure out whether * this kernel supports writing to that file in the first place. (by * checking the writable bit on the file's st_mode) */ if (fstat(peak_fd, &ss)) goto cleanup; if ((ss.st_mode & S_IWUSR) == 0) { ret = KSFT_SKIP; goto cleanup; } peak_fd2 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); if (peak_fd2 == -1) goto cleanup; peak_fd3 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); if (peak_fd3 == -1) goto cleanup; /* any non-empty string resets, but make it clear */ static const char reset_string[] = "reset\n"; peak_reset = write(peak_fd, reset_string, sizeof(reset_string)); if (peak_reset != sizeof(reset_string)) goto cleanup; peak_reset = write(peak_fd2, reset_string, sizeof(reset_string)); if (peak_reset != sizeof(reset_string)) goto cleanup; peak_reset = write(peak_fd3, reset_string, sizeof(reset_string)); if (peak_reset != sizeof(reset_string)) goto cleanup; /* Make sure a completely independent read isn't affected by our FD-local reset above*/ peak = cg_read_long(memcg, "memory.peak"); if (peak < MB(50)) goto cleanup; fd2_closed = true; if (close(peak_fd2)) goto cleanup; peak_fd4 = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); if (peak_fd4 == -1) goto cleanup; peak_reset = write(peak_fd4, reset_string, sizeof(reset_string)); if (peak_reset != sizeof(reset_string)) goto cleanup; peak = cg_read_long_fd(peak_fd); if (peak > MB(30) || peak < 0) goto cleanup; if (cg_run(memcg, alloc_pagecache_50M_check, NULL)) goto cleanup; peak = cg_read_long(memcg, "memory.peak"); if (peak < MB(50)) goto cleanup; /* Make sure everything is back to normal */ peak = cg_read_long_fd(peak_fd); if (peak < MB(50)) goto cleanup; peak = cg_read_long_fd(peak_fd4); if (peak < MB(50)) goto cleanup; fd3_closed = true; if (close(peak_fd3)) goto cleanup; fd4_closed = true; if (close(peak_fd4)) goto cleanup; ret = KSFT_PASS; cleanup: close(peak_fd); if (!fd2_closed) close(peak_fd2); if (!fd3_closed) close(peak_fd3); if (!fd4_closed) close(peak_fd4); cg_destroy(memcg); free(memcg); return ret; } static int alloc_pagecache_50M_noexit(const char *cgroup, void *arg) { int fd = (long)arg; int ppid = getppid(); if (alloc_pagecache(fd, MB(50))) return -1; while (getppid() == ppid) sleep(1); return 0; } static int alloc_anon_noexit(const char *cgroup, void *arg) { int ppid = getppid(); size_t size = (unsigned long)arg; char *buf, *ptr; buf = malloc(size); if (buf == NULL) { fprintf(stderr, "malloc() failed\n"); return -1; } for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE) *ptr = 0; while (getppid() == ppid) sleep(1); free(buf); return 0; } /* * Wait until processes are killed asynchronously by the OOM killer * If we exceed a timeout, fail. */ static int cg_test_proc_killed(const char *cgroup) { int limit; for (limit = 10; limit > 0; limit--) { if (cg_read_strcmp(cgroup, "cgroup.procs", "") == 0) return 0; usleep(100000); } return -1; } static bool reclaim_until(const char *memcg, long goal); /* * First, this test creates the following hierarchy: * A memory.min = 0, memory.max = 200M * A/B memory.min = 50M * A/B/C memory.min = 75M, memory.current = 50M * A/B/D memory.min = 25M, memory.current = 50M * A/B/E memory.min = 0, memory.current = 50M * A/B/F memory.min = 500M, memory.current = 0 * * (or memory.low if we test soft protection) * * Usages are pagecache and the test keeps a running * process in every leaf cgroup. * Then it creates A/G and creates a significant * memory pressure in A. * * Then it checks actual memory usages and expects that: * A/B memory.current ~= 50M * A/B/C memory.current ~= 29M * A/B/D memory.current ~= 21M * A/B/E memory.current ~= 0 * A/B/F memory.current = 0 * (for origin of the numbers, see model in memcg_protection.m.) * * After that it tries to allocate more than there is * unprotected memory in A available, and checks that: * a) memory.min protects pagecache even in this case, * b) memory.low allows reclaiming page cache with low events. * * Then we try to reclaim from A/B/C using memory.reclaim until its * usage reaches 10M. * This makes sure that: * (a) We ignore the protection of the reclaim target memcg. * (b) The previously calculated emin value (~29M) should be dismissed. */ static int test_memcg_protection(const char *root, bool min) { int ret = KSFT_FAIL, rc; char *parent[3] = {NULL}; char *children[4] = {NULL}; const char *attribute = min ? "memory.min" : "memory.low"; long c[4]; long current; int i, attempts; int fd; fd = get_temp_fd(); if (fd < 0) goto cleanup; parent[0] = cg_name(root, "memcg_test_0"); if (!parent[0]) goto cleanup; parent[1] = cg_name(parent[0], "memcg_test_1"); if (!parent[1]) goto cleanup; parent[2] = cg_name(parent[0], "memcg_test_2"); if (!parent[2]) goto cleanup; if (cg_create(parent[0])) goto cleanup; if (cg_read_long(parent[0], attribute)) { /* No memory.min on older kernels is fine */ if (min) ret = KSFT_SKIP; goto cleanup; } if (cg_write(parent[0], "cgroup.subtree_control", "+memory")) goto cleanup; if (cg_write(parent[0], "memory.max", "200M")) goto cleanup; if (cg_write(parent[0], "memory.swap.max", "0")) goto cleanup; if (cg_create(parent[1])) goto cleanup; if (cg_write(parent[1], "cgroup.subtree_control", "+memory")) goto cleanup; if (cg_create(parent[2])) goto cleanup; for (i = 0; i < ARRAY_SIZE(children); i++) { children[i] = cg_name_indexed(parent[1], "child_memcg", i); if (!children[i]) goto cleanup; if (cg_create(children[i])) goto cleanup; if (i > 2) continue; cg_run_nowait(children[i], alloc_pagecache_50M_noexit, (void *)(long)fd); } if (cg_write(parent[1], attribute, "50M")) goto cleanup; if (cg_write(children[0], attribute, "75M")) goto cleanup; if (cg_write(children[1], attribute, "25M")) goto cleanup; if (cg_write(children[2], attribute, "0")) goto cleanup; if (cg_write(children[3], attribute, "500M")) goto cleanup; attempts = 0; while (!values_close(cg_read_long(parent[1], "memory.current"), MB(150), 3)) { if (attempts++ > 5) break; sleep(1); } if (cg_run(parent[2], alloc_anon, (void *)MB(148))) goto cleanup; if (!values_close(cg_read_long(parent[1], "memory.current"), MB(50), 3)) goto cleanup; for (i = 0; i < ARRAY_SIZE(children); i++) c[i] = cg_read_long(children[i], "memory.current"); if (!values_close(c[0], MB(29), 10)) goto cleanup; if (!values_close(c[1], MB(21), 10)) goto cleanup; if (c[3] != 0) goto cleanup; rc = cg_run(parent[2], alloc_anon, (void *)MB(170)); if (min && !rc) goto cleanup; else if (!min && rc) { fprintf(stderr, "memory.low prevents from allocating anon memory\n"); goto cleanup; } current = min ? MB(50) : MB(30); if (!values_close(cg_read_long(parent[1], "memory.current"), current, 3)) goto cleanup; if (!reclaim_until(children[0], MB(10))) goto cleanup; if (min) { ret = KSFT_PASS; goto cleanup; } for (i = 0; i < ARRAY_SIZE(children); i++) { int no_low_events_index = 1; long low, oom; oom = cg_read_key_long(children[i], "memory.events", "oom "); low = cg_read_key_long(children[i], "memory.events", "low "); if (oom) goto cleanup; if (i <= no_low_events_index && low <= 0) goto cleanup; if (i > no_low_events_index && low) goto cleanup; } ret = KSFT_PASS; cleanup: for (i = ARRAY_SIZE(children) - 1; i >= 0; i--) { if (!children[i]) continue; cg_destroy(children[i]); free(children[i]); } for (i = ARRAY_SIZE(parent) - 1; i >= 0; i--) { if (!parent[i]) continue; cg_destroy(parent[i]); free(parent[i]); } close(fd); return ret; } static int test_memcg_min(const char *root) { return test_memcg_protection(root, true); } static int test_memcg_low(const char *root) { return test_memcg_protection(root, false); } static int alloc_pagecache_max_30M(const char *cgroup, void *arg) { size_t size = MB(50); int ret = -1; long current, high, max; int fd; high = cg_read_long(cgroup, "memory.high"); max = cg_read_long(cgroup, "memory.max"); if (high != MB(30) && max != MB(30)) return -1; fd = get_temp_fd(); if (fd < 0) return -1; if (alloc_pagecache(fd, size)) goto cleanup; current = cg_read_long(cgroup, "memory.current"); if (!values_close(current, MB(30), 5)) goto cleanup; ret = 0; cleanup: close(fd); return ret; } /* * This test checks that memory.high limits the amount of * memory which can be consumed by either anonymous memory * or pagecache. */ static int test_memcg_high(const char *root) { int ret = KSFT_FAIL; char *memcg; long high; memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup; if (cg_create(memcg)) goto cleanup; if (cg_read_strcmp(memcg, "memory.high", "max\n")) goto cleanup; if (cg_write(memcg, "memory.swap.max", "0")) goto cleanup; if (cg_write(memcg, "memory.high", "30M")) goto cleanup; if (cg_run(memcg, alloc_anon, (void *)MB(31))) goto cleanup; if (!cg_run(memcg, alloc_pagecache_50M_check, NULL)) goto cleanup; if (cg_run(memcg, alloc_pagecache_max_30M, NULL)) goto cleanup; high = cg_read_key_long(memcg, "memory.events", "high "); if (high <= 0) goto cleanup; ret = KSFT_PASS; cleanup: cg_destroy(memcg); free(memcg); return ret; } static int alloc_anon_mlock(const char *cgroup, void *arg) { size_t size = (size_t)arg; void *buf; buf = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, 0, 0); if (buf == MAP_FAILED) return -1; mlock(buf, size); munmap(buf, size); return 0; } /* * This test checks that memory.high is able to throttle big single shot * allocation i.e. large allocation within one kernel entry. */ static int test_memcg_high_sync(const char *root) { int ret = KSFT_FAIL, pid, fd = -1; char *memcg; long pre_high, pre_max; long post_high, post_max; memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup; if (cg_create(memcg)) goto cleanup; pre_high = cg_read_key_long(memcg, "memory.events", "high "); pre_max = cg_read_key_long(memcg, "memory.events", "max "); if (pre_high < 0 || pre_max < 0) goto cleanup; if (cg_write(memcg, "memory.swap.max", "0")) goto cleanup; if (cg_write(memcg, "memory.high", "30M")) goto cleanup; if (cg_write(memcg, "memory.max", "140M")) goto cleanup; fd = memcg_prepare_for_wait(memcg); if (fd < 0) goto cleanup; pid = cg_run_nowait(memcg, alloc_anon_mlock, (void *)MB(200)); if (pid < 0) goto cleanup; cg_wait_for(fd); post_high = cg_read_key_long(memcg, "memory.events", "high "); post_max = cg_read_key_long(memcg, "memory.events", "max "); if (post_high < 0 || post_max < 0) goto cleanup; if (pre_high == post_high || pre_max != post_max) goto cleanup; ret = KSFT_PASS; cleanup: if (fd >= 0) close(fd); cg_destroy(memcg); free(memcg); return ret; } /* * This test checks that memory.max limits the amount of * memory which can be consumed by either anonymous memory * or pagecache. */ static int test_memcg_max(const char *root) { int ret = KSFT_FAIL; char *memcg; long current, max; memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup; if (cg_create(memcg)) goto cleanup; if (cg_read_strcmp(memcg, "memory.max", "max\n")) goto cleanup; if (cg_write(memcg, "memory.swap.max", "0")) goto cleanup; if (cg_write(memcg, "memory.max", "30M")) goto cleanup; /* Should be killed by OOM killer */ if (!cg_run(memcg, alloc_anon, (void *)MB(100))) goto cleanup; if (cg_run(memcg, alloc_pagecache_max_30M, NULL)) goto cleanup; current = cg_read_long(memcg, "memory.current"); if (current > MB(30) || !current) goto cleanup; max = cg_read_key_long(memcg, "memory.events", "max "); if (max <= 0) goto cleanup; ret = KSFT_PASS; cleanup: cg_destroy(memcg); free(memcg); return ret; } /* * Reclaim from @memcg until usage reaches @goal by writing to * memory.reclaim. * * This function will return false if the usage is already below the * goal. * * This function assumes that writing to memory.reclaim is the only * source of change in memory.current (no concurrent allocations or * reclaim). * * This function makes sure memory.reclaim is sane. It will return * false if memory.reclaim's error codes do not make sense, even if * the usage goal was satisfied. */ static bool reclaim_until(const char *memcg, long goal) { char buf[64]; int retries, err; long current, to_reclaim; bool reclaimed = false; for (retries = 5; retries > 0; retries--) { current = cg_read_long(memcg, "memory.current"); if (current < goal || values_close(current, goal, 3)) break; /* Did memory.reclaim return 0 incorrectly? */ else if (reclaimed) return false; to_reclaim = current - goal; snprintf(buf, sizeof(buf), "%ld", to_reclaim); err = cg_write(memcg, "memory.reclaim", buf); if (!err) reclaimed = true; else if (err != -EAGAIN) return false; } return reclaimed; } /* * This test checks that memory.reclaim reclaims the given * amount of memory (from both anon and file, if possible). */ static int test_memcg_reclaim(const char *root) { int ret = KSFT_FAIL; int fd = -1; int retries; char *memcg; long current, expected_usage; memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup; if (cg_create(memcg)) goto cleanup; current = cg_read_long(memcg, "memory.current"); if (current != 0) goto cleanup; fd = get_temp_fd(); if (fd < 0) goto cleanup; cg_run_nowait(memcg, alloc_pagecache_50M_noexit, (void *)(long)fd); /* * If swap is enabled, try to reclaim from both anon and file, else try * to reclaim from file only. */ if (is_swap_enabled()) { cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(50)); expected_usage = MB(100); } else expected_usage = MB(50); /* * Wait until current usage reaches the expected usage (or we run out of * retries). */ retries = 5; while (!values_close(cg_read_long(memcg, "memory.current"), expected_usage, 10)) { if (retries--) { sleep(1); continue; } else { fprintf(stderr, "failed to allocate %ld for memcg reclaim test\n", expected_usage); goto cleanup; } } /* * Reclaim until current reaches 30M, this makes sure we hit both anon * and file if swap is enabled. */ if (!reclaim_until(memcg, MB(30))) goto cleanup; ret = KSFT_PASS; cleanup: cg_destroy(memcg); free(memcg); close(fd); return ret; } static int alloc_anon_50M_check_swap(const char *cgroup, void *arg) { long mem_max = (long)arg; size_t size = MB(50); char *buf, *ptr; long mem_current, swap_current; int ret = -1; buf = malloc(size); if (buf == NULL) { fprintf(stderr, "malloc() failed\n"); return -1; } for (ptr = buf; ptr < buf + size; ptr += PAGE_SIZE) *ptr = 0; mem_current = cg_read_long(cgroup, "memory.current"); if (!mem_current || !values_close(mem_current, mem_max, 3)) goto cleanup; swap_current = cg_read_long(cgroup, "memory.swap.current"); if (!swap_current || !values_close(mem_current + swap_current, size, 3)) goto cleanup; ret = 0; cleanup: free(buf); return ret; } /* * This test checks that memory.swap.max limits the amount of * anonymous memory which can be swapped out. Additionally, it verifies that * memory.swap.peak reflects the high watermark and can be reset. */ static int test_memcg_swap_max_peak(const char *root) { int ret = KSFT_FAIL; char *memcg; long max, peak; struct stat ss; int swap_peak_fd = -1, mem_peak_fd = -1; /* any non-empty string resets */ static const char reset_string[] = "foobarbaz"; if (!is_swap_enabled()) return KSFT_SKIP; memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup; if (cg_create(memcg)) goto cleanup; if (cg_read_long(memcg, "memory.swap.current")) { ret = KSFT_SKIP; goto cleanup; } swap_peak_fd = cg_open(memcg, "memory.swap.peak", O_RDWR | O_APPEND | O_CLOEXEC); if (swap_peak_fd == -1) { if (errno == ENOENT) ret = KSFT_SKIP; goto cleanup; } /* * Before we try to use memory.swap.peak's fd, try to figure out * whether this kernel supports writing to that file in the first * place. (by checking the writable bit on the file's st_mode) */ if (fstat(swap_peak_fd, &ss)) goto cleanup; if ((ss.st_mode & S_IWUSR) == 0) { ret = KSFT_SKIP; goto cleanup; } mem_peak_fd = cg_open(memcg, "memory.peak", O_RDWR | O_APPEND | O_CLOEXEC); if (mem_peak_fd == -1) goto cleanup; if (cg_read_long(memcg, "memory.swap.peak")) goto cleanup; if (cg_read_long_fd(swap_peak_fd)) goto cleanup; /* switch the swap and mem fds into local-peak tracking mode*/ int peak_reset = write(swap_peak_fd, reset_string, sizeof(reset_string)); if (peak_reset != sizeof(reset_string)) goto cleanup; if (cg_read_long_fd(swap_peak_fd)) goto cleanup; if (cg_read_long(memcg, "memory.peak")) goto cleanup; if (cg_read_long_fd(mem_peak_fd)) goto cleanup; peak_reset = write(mem_peak_fd, reset_string, sizeof(reset_string)); if (peak_reset != sizeof(reset_string)) goto cleanup; if (cg_read_long_fd(mem_peak_fd)) goto cleanup; if (cg_read_strcmp(memcg, "memory.max", "max\n")) goto cleanup; if (cg_read_strcmp(memcg, "memory.swap.max", "max\n")) goto cleanup; if (cg_write(memcg, "memory.swap.max", "30M")) goto cleanup; if (cg_write(memcg, "memory.max", "30M")) goto cleanup; /* Should be killed by OOM killer */ if (!cg_run(memcg, alloc_anon, (void *)MB(100))) goto cleanup; if (cg_read_key_long(memcg, "memory.events", "oom ") != 1) goto cleanup; if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1) goto cleanup; peak = cg_read_long(memcg, "memory.peak"); if (peak < MB(29)) goto cleanup; peak = cg_read_long(memcg, "memory.swap.peak"); if (peak < MB(29)) goto cleanup; peak = cg_read_long_fd(mem_peak_fd); if (peak < MB(29)) goto cleanup; peak = cg_read_long_fd(swap_peak_fd); if (peak < MB(29)) goto cleanup; /* * open, reset and close the peak swap on another FD to make sure * multiple extant fds don't corrupt the linked-list */ peak_reset = cg_write(memcg, "memory.swap.peak", (char *)reset_string); if (peak_reset) goto cleanup; peak_reset = cg_write(memcg, "memory.peak", (char *)reset_string); if (peak_reset) goto cleanup; /* actually reset on the fds */ peak_reset = write(swap_peak_fd, reset_string, sizeof(reset_string)); if (peak_reset != sizeof(reset_string)) goto cleanup; peak_reset = write(mem_peak_fd, reset_string, sizeof(reset_string)); if (peak_reset != sizeof(reset_string)) goto cleanup; peak = cg_read_long_fd(swap_peak_fd); if (peak > MB(10)) goto cleanup; /* * The cgroup is now empty, but there may be a page or two associated * with the open FD accounted to it. */ peak = cg_read_long_fd(mem_peak_fd); if (peak > MB(1)) goto cleanup; if (cg_read_long(memcg, "memory.peak") < MB(29)) goto cleanup; if (cg_read_long(memcg, "memory.swap.peak") < MB(29)) goto cleanup; if (cg_run(memcg, alloc_anon_50M_check_swap, (void *)MB(30))) goto cleanup; max = cg_read_key_long(memcg, "memory.events", "max "); if (max <= 0) goto cleanup; peak = cg_read_long(memcg, "memory.peak"); if (peak < MB(29)) goto cleanup; peak = cg_read_long(memcg, "memory.swap.peak"); if (peak < MB(29)) goto cleanup; peak = cg_read_long_fd(mem_peak_fd); if (peak < MB(29)) goto cleanup; peak = cg_read_long_fd(swap_peak_fd); if (peak < MB(19)) goto cleanup; ret = KSFT_PASS; cleanup: if (mem_peak_fd != -1 && close(mem_peak_fd)) ret = KSFT_FAIL; if (swap_peak_fd != -1 && close(swap_peak_fd)) ret = KSFT_FAIL; cg_destroy(memcg); free(memcg); return ret; } /* * This test disables swapping and tries to allocate anonymous memory * up to OOM. Then it checks for oom and oom_kill events in * memory.events. */ static int test_memcg_oom_events(const char *root) { int ret = KSFT_FAIL; char *memcg; memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup; if (cg_create(memcg)) goto cleanup; if (cg_write(memcg, "memory.max", "30M")) goto cleanup; if (cg_write(memcg, "memory.swap.max", "0")) goto cleanup; if (!cg_run(memcg, alloc_anon, (void *)MB(100))) goto cleanup; if (cg_read_strcmp(memcg, "cgroup.procs", "")) goto cleanup; if (cg_read_key_long(memcg, "memory.events", "oom ") != 1) goto cleanup; if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 1) goto cleanup; ret = KSFT_PASS; cleanup: cg_destroy(memcg); free(memcg); return ret; } struct tcp_server_args { unsigned short port; int ctl[2]; }; static int tcp_server(const char *cgroup, void *arg) { struct tcp_server_args *srv_args = arg; struct sockaddr_in6 saddr = { 0 }; socklen_t slen = sizeof(saddr); int sk, client_sk, ctl_fd, yes = 1, ret = -1; close(srv_args->ctl[0]); ctl_fd = srv_args->ctl[1]; saddr.sin6_family = AF_INET6; saddr.sin6_addr = in6addr_any; saddr.sin6_port = htons(srv_args->port); sk = socket(AF_INET6, SOCK_STREAM, 0); if (sk < 0) return ret; if (setsockopt(sk, SOL_SOCKET, SO_REUSEADDR, &yes, sizeof(yes)) < 0) goto cleanup; if (bind(sk, (struct sockaddr *)&saddr, slen)) { write(ctl_fd, &errno, sizeof(errno)); goto cleanup; } if (listen(sk, 1)) goto cleanup; ret = 0; if (write(ctl_fd, &ret, sizeof(ret)) != sizeof(ret)) { ret = -1; goto cleanup; } client_sk = accept(sk, NULL, NULL); if (client_sk < 0) goto cleanup; ret = -1; for (;;) { uint8_t buf[0x100000]; if (write(client_sk, buf, sizeof(buf)) <= 0) { if (errno == ECONNRESET) ret = 0; break; } } close(client_sk); cleanup: close(sk); return ret; } static int tcp_client(const char *cgroup, unsigned short port) { const char server[] = "localhost"; struct addrinfo *ai; char servport[6]; int retries = 0x10; /* nice round number */ int sk, ret; long allocated; allocated = cg_read_long(cgroup, "memory.current"); snprintf(servport, sizeof(servport), "%hd", port); ret = getaddrinfo(server, servport, NULL, &ai); if (ret) return ret; sk = socket(ai->ai_family, ai->ai_socktype, ai->ai_protocol); if (sk < 0) goto free_ainfo; ret = connect(sk, ai->ai_addr, ai->ai_addrlen); if (ret < 0) goto close_sk; ret = KSFT_FAIL; while (retries--) { uint8_t buf[0x100000]; long current, sock; if (read(sk, buf, sizeof(buf)) <= 0) goto close_sk; current = cg_read_long(cgroup, "memory.current"); sock = cg_read_key_long(cgroup, "memory.stat", "sock "); if (current < 0 || sock < 0) goto close_sk; /* exclude the memory not related to socket connection */ if (values_close(current - allocated, sock, 10)) { ret = KSFT_PASS; break; } } close_sk: close(sk); free_ainfo: freeaddrinfo(ai); return ret; } /* * This test checks socket memory accounting. * The test forks a TCP server listens on a random port between 1000 * and 61000. Once it gets a client connection, it starts writing to * its socket. * The TCP client interleaves reads from the socket with check whether * memory.current and memory.stat.sock are similar. */ static int test_memcg_sock(const char *root) { int bind_retries = 5, ret = KSFT_FAIL, pid, err; unsigned short port; char *memcg; memcg = cg_name(root, "memcg_test"); if (!memcg) goto cleanup; if (cg_create(memcg)) goto cleanup; while (bind_retries--) { struct tcp_server_args args; if (pipe(args.ctl)) goto cleanup; port = args.port = 1000 + rand() % 60000; pid = cg_run_nowait(memcg, tcp_server, &args); if (pid < 0) goto cleanup; close(args.ctl[1]); if (read(args.ctl[0], &err, sizeof(err)) != sizeof(err)) goto cleanup; close(args.ctl[0]); if (!err) break; if (err != EADDRINUSE) goto cleanup; waitpid(pid, NULL, 0); } if (err == EADDRINUSE) { ret = KSFT_SKIP; goto cleanup; } if (tcp_client(memcg, port) != KSFT_PASS) goto cleanup; waitpid(pid, &err, 0); if (WEXITSTATUS(err)) goto cleanup; if (cg_read_long(memcg, "memory.current") < 0) goto cleanup; if (cg_read_key_long(memcg, "memory.stat", "sock ")) goto cleanup; ret = KSFT_PASS; cleanup: cg_destroy(memcg); free(memcg); return ret; } /* * This test disables swapping and tries to allocate anonymous memory * up to OOM with memory.group.oom set. Then it checks that all * processes in the leaf were killed. It also checks that oom_events * were propagated to the parent level. */ static int test_memcg_oom_group_leaf_events(const char *root) { int ret = KSFT_FAIL; char *parent, *child; long parent_oom_events; parent = cg_name(root, "memcg_test_0"); child = cg_name(root, "memcg_test_0/memcg_test_1"); if (!parent || !child) goto cleanup; if (cg_create(parent)) goto cleanup; if (cg_create(child)) goto cleanup; if (cg_write(parent, "cgroup.subtree_control", "+memory")) goto cleanup; if (cg_write(child, "memory.max", "50M")) goto cleanup; if (cg_write(child, "memory.swap.max", "0")) goto cleanup; if (cg_write(child, "memory.oom.group", "1")) goto cleanup; cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60)); cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); if (!cg_run(child, alloc_anon, (void *)MB(100))) goto cleanup; if (cg_test_proc_killed(child)) goto cleanup; if (cg_read_key_long(child, "memory.events", "oom_kill ") <= 0) goto cleanup; parent_oom_events = cg_read_key_long( parent, "memory.events", "oom_kill "); /* * If memory_localevents is not enabled (the default), the parent should * count OOM events in its children groups. Otherwise, it should not * have observed any events. */ if (has_localevents && parent_oom_events != 0) goto cleanup; else if (!has_localevents && parent_oom_events <= 0) goto cleanup; ret = KSFT_PASS; cleanup: if (child) cg_destroy(child); if (parent) cg_destroy(parent); free(child); free(parent); return ret; } /* * This test disables swapping and tries to allocate anonymous memory * up to OOM with memory.group.oom set. Then it checks that all * processes in the parent and leaf were killed. */ static int test_memcg_oom_group_parent_events(const char *root) { int ret = KSFT_FAIL; char *parent, *child; parent = cg_name(root, "memcg_test_0"); child = cg_name(root, "memcg_test_0/memcg_test_1"); if (!parent || !child) goto cleanup; if (cg_create(parent)) goto cleanup; if (cg_create(child)) goto cleanup; if (cg_write(parent, "memory.max", "80M")) goto cleanup; if (cg_write(parent, "memory.swap.max", "0")) goto cleanup; if (cg_write(parent, "memory.oom.group", "1")) goto cleanup; cg_run_nowait(parent, alloc_anon_noexit, (void *) MB(60)); cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); cg_run_nowait(child, alloc_anon_noexit, (void *) MB(1)); if (!cg_run(child, alloc_anon, (void *)MB(100))) goto cleanup; if (cg_test_proc_killed(child)) goto cleanup; if (cg_test_proc_killed(parent)) goto cleanup; ret = KSFT_PASS; cleanup: if (child) cg_destroy(child); if (parent) cg_destroy(parent); free(child); free(parent); return ret; } /* * This test disables swapping and tries to allocate anonymous memory * up to OOM with memory.group.oom set. Then it checks that all * processes were killed except those set with OOM_SCORE_ADJ_MIN */ static int test_memcg_oom_group_score_events(const char *root) { int ret = KSFT_FAIL; char *memcg; int safe_pid; memcg = cg_name(root, "memcg_test_0"); if (!memcg) goto cleanup; if (cg_create(memcg)) goto cleanup; if (cg_write(memcg, "memory.max", "50M")) goto cleanup; if (cg_write(memcg, "memory.swap.max", "0")) goto cleanup; if (cg_write(memcg, "memory.oom.group", "1")) goto cleanup; safe_pid = cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1)); if (set_oom_adj_score(safe_pid, OOM_SCORE_ADJ_MIN)) goto cleanup; cg_run_nowait(memcg, alloc_anon_noexit, (void *) MB(1)); if (!cg_run(memcg, alloc_anon, (void *)MB(100))) goto cleanup; if (cg_read_key_long(memcg, "memory.events", "oom_kill ") != 3) goto cleanup; if (kill(safe_pid, SIGKILL)) goto cleanup; ret = KSFT_PASS; cleanup: if (memcg) cg_destroy(memcg); free(memcg); return ret; } #define T(x) { x, #x } struct memcg_test { int (*fn)(const char *root); const char *name; } tests[] = { T(test_memcg_subtree_control), T(test_memcg_current_peak), T(test_memcg_min), T(test_memcg_low), T(test_memcg_high), T(test_memcg_high_sync), T(test_memcg_max), T(test_memcg_reclaim), T(test_memcg_oom_events), T(test_memcg_swap_max_peak), T(test_memcg_sock), T(test_memcg_oom_group_leaf_events), T(test_memcg_oom_group_parent_events), T(test_memcg_oom_group_score_events), }; #undef T int main(int argc, char **argv) { char root[PATH_MAX]; int i, proc_status, ret = EXIT_SUCCESS; if (cg_find_unified_root(root, sizeof(root), NULL)) ksft_exit_skip("cgroup v2 isn't mounted\n"); /* * Check that memory controller is available: * memory is listed in cgroup.controllers */ if (cg_read_strstr(root, "cgroup.controllers", "memory")) ksft_exit_skip("memory controller isn't available\n"); if (cg_read_strstr(root, "cgroup.subtree_control", "memory")) if (cg_write(root, "cgroup.subtree_control", "+memory")) ksft_exit_skip("Failed to set memory controller\n"); proc_status = proc_mount_contains("memory_recursiveprot"); if (proc_status < 0) ksft_exit_skip("Failed to query cgroup mount option\n"); has_recursiveprot = proc_status; proc_status = proc_mount_contains("memory_localevents"); if (proc_status < 0) ksft_exit_skip("Failed to query cgroup mount option\n"); has_localevents = proc_status; for (i = 0; i < ARRAY_SIZE(tests); i++) { switch (tests[i].fn(root)) { case KSFT_PASS: ksft_test_result_pass("%s\n", tests[i].name); break; case KSFT_SKIP: ksft_test_result_skip("%s\n", tests[i].name); break; default: ret = EXIT_FAILURE; ksft_test_result_fail("%s\n", tests[i].name); break; } } return ret; }
// SPDX-License-Identifier: GPL-2.0 #include "qcom-ipq8064.dtsi" #include <dt-bindings/input/input.h> #include <dt-bindings/leds/common.h> / { model = "Qualcomm Technologies, Inc. IPQ8064-v1.0"; aliases { serial0 = &gsbi4_serial; }; chosen { stdout-path = "serial0:115200n8"; }; gpio-keys { compatible = "gpio-keys"; pinctrl-0 = <&buttons_pins>; pinctrl-names = "default"; button-1 { label = "reset"; linux,code = <KEY_RESTART>; gpios = <&qcom_pinmux 54 GPIO_ACTIVE_LOW>; linux,input-type = <1>; debounce-interval = <60>; }; button-2 { label = "wps"; linux,code = <KEY_WPS_BUTTON>; gpios = <&qcom_pinmux 65 GPIO_ACTIVE_LOW>; linux,input-type = <1>; debounce-interval = <60>; }; }; leds { compatible = "gpio-leds"; pinctrl-0 = <&leds_pins>; pinctrl-names = "default"; led-0 { label = "led_usb1"; gpios = <&qcom_pinmux 7 GPIO_ACTIVE_HIGH>; linux,default-trigger = "usbdev"; default-state = "off"; }; led-1 { label = "led_usb3"; gpios = <&qcom_pinmux 8 GPIO_ACTIVE_HIGH>; linux,default-trigger = "usbdev"; default-state = "off"; }; led-2 { label = "status_led_fail"; function = LED_FUNCTION_STATUS; gpios = <&qcom_pinmux 9 GPIO_ACTIVE_HIGH>; default-state = "off"; }; led-3 { label = "sata_led"; gpios = <&qcom_pinmux 26 GPIO_ACTIVE_HIGH>; default-state = "off"; }; led-4 { label = "status_led_pass"; function = LED_FUNCTION_STATUS; gpios = <&qcom_pinmux 53 GPIO_ACTIVE_HIGH>; default-state = "off"; }; }; soc { gsbi@16300000 { qcom,mode = <GSBI_PROT_I2C_UART>; status = "okay"; serial@16340000 { status = "okay"; }; }; gsbi5: gsbi@1a200000 { qcom,mode = <GSBI_PROT_SPI>; status = "okay"; spi4: spi@1a280000 { status = "okay"; pinctrl-0 = <&spi_pins>; pinctrl-names = "default"; cs-gpios = <&qcom_pinmux 20 0>; flash: flash@0 { compatible = "s25fl256s1"; #address-cells = <1>; #size-cells = <1>; spi-max-frequency = <50000000>; reg = <0>; partition@0 { label = "rootfs"; reg = <0x0 0x1000000>; }; partition@1 { label = "scratch"; reg = <0x1000000 0x1000000>; }; }; }; }; sata-phy@1b400000 { status = "okay"; }; sata@29000000 { ports-implemented = <0x1>; status = "okay"; }; }; };
/* SPDX-License-Identifier: GPL-2.0 */ /* * Imagination E5010 JPEG Encoder driver. * * Copyright (C) 2023 Texas Instruments Incorporated - https://www.ti.com/ * * Author: David Huang <[email protected]> * Author: Devarsh Thakkar <[email protected]> */ #ifndef _E5010_CORE_REGS_H #define _E5010_CORE_REGS_H #define JASPER_CORE_ID_OFFSET (0x0000) #define JASPER_CORE_ID_CR_GROUP_ID_MASK (0xFF000000) #define JASPER_CORE_ID_CR_GROUP_ID_SHIFT (24) #define JASPER_CORE_ID_CR_CORE_ID_MASK (0x00FF0000) #define JASPER_CORE_ID_CR_CORE_ID_SHIFT (16) #define JASPER_CORE_ID_CR_UNIQUE_NUM_MASK (0x0000FFF8) #define JASPER_CORE_ID_CR_UNIQUE_NUM_SHIFT (3) #define JASPER_CORE_ID_CR_PELS_PER_CYCLE_MASK (0x00000007) #define JASPER_CORE_ID_CR_PELS_PER_CYCLE_SHIFT (0) #define JASPER_CORE_REV_OFFSET (0x0004) #define JASPER_CORE_REV_CR_JASPER_DESIGNER_MASK (0xFF000000) #define JASPER_CORE_REV_CR_JASPER_DESIGNER_SHIFT (24) #define JASPER_CORE_REV_CR_JASPER_MAJOR_REV_MASK (0x00FF0000) #define JASPER_CORE_REV_CR_JASPER_MAJOR_REV_SHIFT (16) #define JASPER_CORE_REV_CR_JASPER_MINOR_REV_MASK (0x0000FF00) #define JASPER_CORE_REV_CR_JASPER_MINOR_REV_SHIFT (8) #define JASPER_CORE_REV_CR_JASPER_MAINT_REV_MASK (0x000000FF) #define JASPER_CORE_REV_CR_JASPER_MAINT_REV_SHIFT (0) #define JASPER_INTERRUPT_MASK_OFFSET (0x0008) #define JASPER_INTERRUPT_MASK_CR_OUTPUT_ADDRESS_ERROR_ENABLE_MASK (0x00000002) #define JASPER_INTERRUPT_MASK_CR_OUTPUT_ADDRESS_ERROR_ENABLE_SHIFT (1) #define JASPER_INTERRUPT_MASK_CR_PICTURE_DONE_ENABLE_MASK (0x00000001) #define JASPER_INTERRUPT_MASK_CR_PICTURE_DONE_ENABLE_SHIFT (0) #define JASPER_INTERRUPT_STATUS_OFFSET (0x000C) #define JASPER_INTERRUPT_STATUS_CR_OUTPUT_ADDRESS_ERROR_IRQ_MASK (0x00000002) #define JASPER_INTERRUPT_STATUS_CR_OUTPUT_ADDRESS_ERROR_IRQ_SHIFT (1) #define JASPER_INTERRUPT_STATUS_CR_PICTURE_DONE_IRQ_MASK (0x00000001) #define JASPER_INTERRUPT_STATUS_CR_PICTURE_DONE_IRQ_SHIFT (0) #define JASPER_INTERRUPT_CLEAR_OFFSET (0x0010) #define JASPER_INTERRUPT_CLEAR_CR_OUTPUT_ERROR_CLEAR_MASK (0x00000002) #define JASPER_INTERRUPT_CLEAR_CR_OUTPUT_ERROR_CLEAR_SHIFT (1) #define JASPER_INTERRUPT_CLEAR_CR_PICTURE_DONE_CLEAR_MASK (0x00000001) #define JASPER_INTERRUPT_CLEAR_CR_PICTURE_DONE_CLEAR_SHIFT (0) #define JASPER_CLK_CONTROL_OFFSET (0x0014) #define JASPER_CLK_CONTROL_CR_JASPER_AUTO_CLKG_ENABLE_MASK (0x00000002) #define JASPER_CLK_CONTROL_CR_JASPER_AUTO_CLKG_ENABLE_SHIFT (1) #define JASPER_CLK_CONTROL_CR_JASPER_MAN_CLKG_ENABLE_MASK (0x00000001) #define JASPER_CLK_CONTROL_CR_JASPER_MAN_CLKG_ENABLE_SHIFT (0) #define JASPER_CLK_STATUS_OFFSET (0x0018) #define JASPER_CLK_STATUS_CR_JASPER_CLKG_STATUS_MASK (0x00000001) #define JASPER_CLK_STATUS_CR_JASPER_CLKG_STATUS_SHIFT (0) #define JASPER_RESET_OFFSET (0x001C) #define JASPER_RESET_CR_SYS_RESET_MASK (0x00000002) #define JASPER_RESET_CR_SYS_RESET_SHIFT (1) #define JASPER_RESET_CR_CORE_RESET_MASK (0x00000001) #define JASPER_RESET_CR_CORE_RESET_SHIFT (0) #define JASPER_CORE_CTRL_OFFSET (0x0020) #define JASPER_CORE_CTRL_CR_JASPER_ENCODE_START_MASK (0x00000001) #define JASPER_CORE_CTRL_CR_JASPER_ENCODE_START_SHIFT (0) #define JASPER_STATUS_OFFSET (0x0024) #define JASPER_STATUS_CR_FLUSH_MODE_MASK (0x00000002) #define JASPER_STATUS_CR_FLUSH_MODE_SHIFT (1) #define JASPER_STATUS_CR_JASPER_BUSY_MASK (0x00000001) #define JASPER_STATUS_CR_JASPER_BUSY_SHIFT (0) #define JASPER_CRC_CLEAR_OFFSET (0x0028) #define JASPER_CRC_CLEAR_CR_FRONT_END_CRC_CLEAR_MASK (0x00000001) #define JASPER_CRC_CLEAR_CR_FRONT_END_CRC_CLEAR_SHIFT (0) #define JASPER_CRC_CLEAR_CR_DCT_CRC_CLEAR_MASK (0x00000002) #define JASPER_CRC_CLEAR_CR_DCT_CRC_CLEAR_SHIFT (1) #define JASPER_CRC_CLEAR_CR_ZZ_CRC_CLEAR_MASK (0x00000004) #define JASPER_CRC_CLEAR_CR_ZZ_CRC_CLEAR_SHIFT (2) #define JASPER_CRC_CLEAR_CR_QUANT_CRC_CLEAR_MASK (0x00000008) #define JASPER_CRC_CLEAR_CR_QUANT_CRC_CLEAR_SHIFT (3) #define JASPER_CRC_CLEAR_CR_ENTROPY_ENCODER_CRC_CLEAR_MASK (0x00000010) #define JASPER_CRC_CLEAR_CR_ENTROPY_ENCODER_CRC_CLEAR_SHIFT (4) #define JASPER_CRC_CLEAR_CR_PACKING_BUFFER_CRC_CLEAR_MASK (0x00000020) #define JASPER_CRC_CLEAR_CR_PACKING_BUFFER_CRC_CLEAR_SHIFT (5) #define JASPER_INPUT_CTRL0_OFFSET (0x002C) #define JASPER_INPUT_CTRL0_CR_INPUT_CHROMA_ORDER_MASK (0x01000000) #define JASPER_INPUT_CTRL0_CR_INPUT_CHROMA_ORDER_SHIFT (24) #define JASPER_INPUT_CTRL0_CR_INPUT_SUBSAMPLING_MASK (0x00030000) #define JASPER_INPUT_CTRL0_CR_INPUT_SUBSAMPLING_SHIFT (16) #define JASPER_INPUT_CTRL0_CR_INPUT_SOURCE_MASK (0x00000004) #define JASPER_INPUT_CTRL0_CR_INPUT_SOURCE_SHIFT (2) #define JASPER_INPUT_CTRL1_OFFSET (0x0030) #define JASPER_INPUT_CTRL1_CR_INPUT_LUMA_STRIDE_MASK (0x1FC00000) #define JASPER_INPUT_CTRL1_CR_INPUT_LUMA_STRIDE_SHIFT (22) #define JASPER_INPUT_CTRL1_CR_INPUT_CHROMA_STRIDE_MASK (0x00001FC0) #define JASPER_INPUT_CTRL1_CR_INPUT_CHROMA_STRIDE_SHIFT (6) #define JASPER_MMU_CTRL_OFFSET (0x0034) #define JASPER_MMU_CTRL_CR_JASPER_TILING_SCHEME_MASK (0x00000002) #define JASPER_MMU_CTRL_CR_JASPER_TILING_SCHEME_SHIFT (1) #define JASPER_MMU_CTRL_CR_JASPER_TILING_ENABLE_MASK (0x00000001) #define JASPER_MMU_CTRL_CR_JASPER_TILING_ENABLE_SHIFT (0) #define JASPER_IMAGE_SIZE_OFFSET (0x0038) #define JASPER_IMAGE_SIZE_CR_IMAGE_VERTICAL_SIZE_MASK (0x1FFF0000) #define JASPER_IMAGE_SIZE_CR_IMAGE_VERTICAL_SIZE_SHIFT (16) #define JASPER_IMAGE_SIZE_CR_IMAGE_HORIZONTAL_SIZE_MASK (0x00001FFF) #define JASPER_IMAGE_SIZE_CR_IMAGE_HORIZONTAL_SIZE_SHIFT (0) #define INPUT_LUMA_BASE_OFFSET (0x003C) #define INPUT_LUMA_BASE_CR_INPUT_LUMA_BASE_MASK (0xFFFFFFC0) #define INPUT_LUMA_BASE_CR_INPUT_LUMA_BASE_SHIFT (6) #define INPUT_CHROMA_BASE_OFFSET (0x0040) #define INPUT_CHROMA_BASE_CR_INPUT_CHROMA_BASE_MASK (0xFFFFFFC0) #define INPUT_CHROMA_BASE_CR_INPUT_CHROMA_BASE_SHIFT (6) #define JASPER_OUTPUT_BASE_OFFSET (0x0044) #define JASPER_OUTPUT_BASE_CR_OUTPUT_BASE_MASK (0xFFFFFFFF) #define JASPER_OUTPUT_BASE_CR_OUTPUT_BASE_SHIFT (0) #define JASPER_OUTPUT_SIZE_OFFSET (0x0048) #define JASPER_OUTPUT_SIZE_CR_OUTPUT_SIZE_MASK (0xFFFFFFFF) #define JASPER_OUTPUT_SIZE_CR_OUTPUT_SIZE_SHIFT (0) #define JASPER_OUTPUT_MAX_SIZE_OFFSET (0x004C) #define JASPER_OUTPUT_MAX_SIZE_CR_OUTPUT_MAX_SIZE_MASK (0xFFFFFFFF) #define JASPER_OUTPUT_MAX_SIZE_CR_OUTPUT_MAX_SIZE_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE0_OFFSET (0x0050) #define JASPER_LUMA_QUANTIZATION_TABLE0_CR_LUMA_QUANTIZATION_TABLE_03_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE0_CR_LUMA_QUANTIZATION_TABLE_03_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE0_CR_LUMA_QUANTIZATION_TABLE_02_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE0_CR_LUMA_QUANTIZATION_TABLE_02_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE0_CR_LUMA_QUANTIZATION_TABLE_01_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE0_CR_LUMA_QUANTIZATION_TABLE_01_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE0_CR_LUMA_QUANTIZATION_TABLE_00_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE0_CR_LUMA_QUANTIZATION_TABLE_00_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE1_OFFSET (0x0054) #define JASPER_LUMA_QUANTIZATION_TABLE1_CR_LUMA_QUANTIZATION_TABLE_07_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE1_CR_LUMA_QUANTIZATION_TABLE_07_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE1_CR_LUMA_QUANTIZATION_TABLE_06_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE1_CR_LUMA_QUANTIZATION_TABLE_06_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE1_CR_LUMA_QUANTIZATION_TABLE_05_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE1_CR_LUMA_QUANTIZATION_TABLE_05_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE1_CR_LUMA_QUANTIZATION_TABLE_04_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE1_CR_LUMA_QUANTIZATION_TABLE_04_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE2_OFFSET (0x0058) #define JASPER_LUMA_QUANTIZATION_TABLE2_CR_LUMA_QUANTIZATION_TABLE_13_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE2_CR_LUMA_QUANTIZATION_TABLE_13_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE2_CR_LUMA_QUANTIZATION_TABLE_12_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE2_CR_LUMA_QUANTIZATION_TABLE_12_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE2_CR_LUMA_QUANTIZATION_TABLE_11_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE2_CR_LUMA_QUANTIZATION_TABLE_11_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE2_CR_LUMA_QUANTIZATION_TABLE_10_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE2_CR_LUMA_QUANTIZATION_TABLE_10_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE3_OFFSET (0x005C) #define JASPER_LUMA_QUANTIZATION_TABLE3_CR_LUMA_QUANTIZATION_TABLE_17_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE3_CR_LUMA_QUANTIZATION_TABLE_17_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE3_CR_LUMA_QUANTIZATION_TABLE_16_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE3_CR_LUMA_QUANTIZATION_TABLE_16_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE3_CR_LUMA_QUANTIZATION_TABLE_15_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE3_CR_LUMA_QUANTIZATION_TABLE_15_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE3_CR_LUMA_QUANTIZATION_TABLE_14_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE3_CR_LUMA_QUANTIZATION_TABLE_14_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE4_OFFSET (0x0060) #define JASPER_LUMA_QUANTIZATION_TABLE4_CR_LUMA_QUANTIZATION_TABLE_21_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE4_CR_LUMA_QUANTIZATION_TABLE_21_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE4_CR_LUMA_QUANTIZATION_TABLE_20_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE4_CR_LUMA_QUANTIZATION_TABLE_20_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE5_OFFSET (0x0064) #define JASPER_LUMA_QUANTIZATION_TABLE5_CR_LUMA_QUANTIZATION_TABLE_27_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE5_CR_LUMA_QUANTIZATION_TABLE_27_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE5_CR_LUMA_QUANTIZATION_TABLE_26_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE5_CR_LUMA_QUANTIZATION_TABLE_26_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE5_CR_LUMA_QUANTIZATION_TABLE_25_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE5_CR_LUMA_QUANTIZATION_TABLE_25_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE5_CR_LUMA_QUANTIZATION_TABLE_24_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE5_CR_LUMA_QUANTIZATION_TABLE_24_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE6_OFFSET (0x0068) #define JASPER_LUMA_QUANTIZATION_TABLE6_CR_LUMA_QUANTIZATION_TABLE_33_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE6_CR_LUMA_QUANTIZATION_TABLE_33_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE6_CR_LUMA_QUANTIZATION_TABLE_32_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE6_CR_LUMA_QUANTIZATION_TABLE_32_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE6_CR_LUMA_QUANTIZATION_TABLE_31_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE6_CR_LUMA_QUANTIZATION_TABLE_31_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE6_CR_LUMA_QUANTIZATION_TABLE_30_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE6_CR_LUMA_QUANTIZATION_TABLE_30_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE7_OFFSET (0x006C) #define JASPER_LUMA_QUANTIZATION_TABLE7_CR_LUMA_QUANTIZATION_TABLE_37_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE7_CR_LUMA_QUANTIZATION_TABLE_37_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE7_CR_LUMA_QUANTIZATION_TABLE_36_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE7_CR_LUMA_QUANTIZATION_TABLE_36_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE7_CR_LUMA_QUANTIZATION_TABLE_35_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE7_CR_LUMA_QUANTIZATION_TABLE_35_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE7_CR_LUMA_QUANTIZATION_TABLE_34_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE7_CR_LUMA_QUANTIZATION_TABLE_34_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE8_OFFSET (0x0070) #define JASPER_LUMA_QUANTIZATION_TABLE8_CR_LUMA_QUANTIZATION_TABLE_43_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE8_CR_LUMA_QUANTIZATION_TABLE_43_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE8_CR_LUMA_QUANTIZATION_TABLE_42_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE8_CR_LUMA_QUANTIZATION_TABLE_42_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE8_CR_LUMA_QUANTIZATION_TABLE_41_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE8_CR_LUMA_QUANTIZATION_TABLE_41_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE8_CR_LUMA_QUANTIZATION_TABLE_40_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE8_CR_LUMA_QUANTIZATION_TABLE_40_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE9_OFFSET (0x0074) #define JASPER_LUMA_QUANTIZATION_TABLE9_CR_LUMA_QUANTIZATION_TABLE_47_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE9_CR_LUMA_QUANTIZATION_TABLE_47_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE9_CR_LUMA_QUANTIZATION_TABLE_46_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE9_CR_LUMA_QUANTIZATION_TABLE_46_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE9_CR_LUMA_QUANTIZATION_TABLE_45_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE9_CR_LUMA_QUANTIZATION_TABLE_45_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE9_CR_LUMA_QUANTIZATION_TABLE_44_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE9_CR_LUMA_QUANTIZATION_TABLE_44_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE10_OFFSET (0x0078) #define JASPER_LUMA_QUANTIZATION_TABLE10_CR_LUMA_QUANTIZATION_TABLE_53_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE10_CR_LUMA_QUANTIZATION_TABLE_53_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE10_CR_LUMA_QUANTIZATION_TABLE_52_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE10_CR_LUMA_QUANTIZATION_TABLE_52_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE10_CR_LUMA_QUANTIZATION_TABLE_51_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE10_CR_LUMA_QUANTIZATION_TABLE_51_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE10_CR_LUMA_QUANTIZATION_TABLE_50_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE10_CR_LUMA_QUANTIZATION_TABLE_50_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE11_OFFSET (0x007C) #define JASPER_LUMA_QUANTIZATION_TABLE11_CR_LUMA_QUANTIZATION_TABLE_57_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE11_CR_LUMA_QUANTIZATION_TABLE_57_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE11_CR_LUMA_QUANTIZATION_TABLE_56_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE11_CR_LUMA_QUANTIZATION_TABLE_56_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE11_CR_LUMA_QUANTIZATION_TABLE_55_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE11_CR_LUMA_QUANTIZATION_TABLE_55_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE11_CR_LUMA_QUANTIZATION_TABLE_54_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE11_CR_LUMA_QUANTIZATION_TABLE_54_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE12_OFFSET (0x0080) #define JASPER_LUMA_QUANTIZATION_TABLE12_CR_LUMA_QUANTIZATION_TABLE_63_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE12_CR_LUMA_QUANTIZATION_TABLE_63_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE12_CR_LUMA_QUANTIZATION_TABLE_62_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE12_CR_LUMA_QUANTIZATION_TABLE_62_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE12_CR_LUMA_QUANTIZATION_TABLE_61_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE12_CR_LUMA_QUANTIZATION_TABLE_61_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE12_CR_LUMA_QUANTIZATION_TABLE_60_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE12_CR_LUMA_QUANTIZATION_TABLE_60_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE13_OFFSET (0x0084) #define JASPER_LUMA_QUANTIZATION_TABLE13_CR_LUMA_QUANTIZATION_TABLE_67_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE13_CR_LUMA_QUANTIZATION_TABLE_67_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE13_CR_LUMA_QUANTIZATION_TABLE_66_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE13_CR_LUMA_QUANTIZATION_TABLE_66_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE13_CR_LUMA_QUANTIZATION_TABLE_65_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE13_CR_LUMA_QUANTIZATION_TABLE_65_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE13_CR_LUMA_QUANTIZATION_TABLE_64_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE13_CR_LUMA_QUANTIZATION_TABLE_64_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE14_OFFSET (0x0088) #define JASPER_LUMA_QUANTIZATION_TABLE14_CR_LUMA_QUANTIZATION_TABLE_73_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE14_CR_LUMA_QUANTIZATION_TABLE_73_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE14_CR_LUMA_QUANTIZATION_TABLE_72_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE14_CR_LUMA_QUANTIZATION_TABLE_72_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE14_CR_LUMA_QUANTIZATION_TABLE_71_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE14_CR_LUMA_QUANTIZATION_TABLE_71_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE14_CR_LUMA_QUANTIZATION_TABLE_70_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE14_CR_LUMA_QUANTIZATION_TABLE_70_SHIFT (0) #define JASPER_LUMA_QUANTIZATION_TABLE15_OFFSET (0x008C) #define JASPER_LUMA_QUANTIZATION_TABLE15_CR_LUMA_QUANTIZATION_TABLE_77_MASK (0xFF000000) #define JASPER_LUMA_QUANTIZATION_TABLE15_CR_LUMA_QUANTIZATION_TABLE_77_SHIFT (24) #define JASPER_LUMA_QUANTIZATION_TABLE15_CR_LUMA_QUANTIZATION_TABLE_76_MASK (0x00FF0000) #define JASPER_LUMA_QUANTIZATION_TABLE15_CR_LUMA_QUANTIZATION_TABLE_76_SHIFT (16) #define JASPER_LUMA_QUANTIZATION_TABLE15_CR_LUMA_QUANTIZATION_TABLE_75_MASK (0x0000FF00) #define JASPER_LUMA_QUANTIZATION_TABLE15_CR_LUMA_QUANTIZATION_TABLE_75_SHIFT (8) #define JASPER_LUMA_QUANTIZATION_TABLE15_CR_LUMA_QUANTIZATION_TABLE_74_MASK (0x000000FF) #define JASPER_LUMA_QUANTIZATION_TABLE15_CR_LUMA_QUANTIZATION_TABLE_74_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE0_OFFSET (0x0090) #define JASPER_CHROMA_QUANTIZATION_TABLE0_CR_CHROMA_QUANTIZATION_TABLE_03_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE0_CR_CHROMA_QUANTIZATION_TABLE_03_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE0_CR_CHROMA_QUANTIZATION_TABLE_02_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE0_CR_CHROMA_QUANTIZATION_TABLE_02_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE0_CR_CHROMA_QUANTIZATION_TABLE_01_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE0_CR_CHROMA_QUANTIZATION_TABLE_01_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE0_CR_CHROMA_QUANTIZATION_TABLE_00_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE0_CR_CHROMA_QUANTIZATION_TABLE_00_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE1_OFFSET (0x0094) #define JASPER_CHROMA_QUANTIZATION_TABLE1_CR_CHROMA_QUANTIZATION_TABLE_07_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE1_CR_CHROMA_QUANTIZATION_TABLE_07_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE1_CR_CHROMA_QUANTIZATION_TABLE_06_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE1_CR_CHROMA_QUANTIZATION_TABLE_06_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE1_CR_CHROMA_QUANTIZATION_TABLE_05_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE1_CR_CHROMA_QUANTIZATION_TABLE_05_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE1_CR_CHROMA_QUANTIZATION_TABLE_04_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE1_CR_CHROMA_QUANTIZATION_TABLE_04_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE2_OFFSET (0x0098) #define JASPER_CHROMA_QUANTIZATION_TABLE2_CR_CHROMA_QUANTIZATION_TABLE_13_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE2_CR_CHROMA_QUANTIZATION_TABLE_13_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE2_CR_CHROMA_QUANTIZATION_TABLE_12_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE2_CR_CHROMA_QUANTIZATION_TABLE_12_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE2_CR_CHROMA_QUANTIZATION_TABLE_11_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE2_CR_CHROMA_QUANTIZATION_TABLE_11_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE2_CR_CHROMA_QUANTIZATION_TABLE_10_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE2_CR_CHROMA_QUANTIZATION_TABLE_10_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE3_OFFSET (0x009C) #define JASPER_CHROMA_QUANTIZATION_TABLE3_CR_CHROMA_QUANTIZATION_TABLE_17_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE3_CR_CHROMA_QUANTIZATION_TABLE_17_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE3_CR_CHROMA_QUANTIZATION_TABLE_16_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE3_CR_CHROMA_QUANTIZATION_TABLE_16_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE3_CR_CHROMA_QUANTIZATION_TABLE_15_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE3_CR_CHROMA_QUANTIZATION_TABLE_15_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE3_CR_CHROMA_QUANTIZATION_TABLE_14_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE3_CR_CHROMA_QUANTIZATION_TABLE_14_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE4_OFFSET (0x00A0) #define JASPER_CHROMA_QUANTIZATION_TABLE4_CR_CHROMA_QUANTIZATION_TABLE_23_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE4_CR_CHROMA_QUANTIZATION_TABLE_23_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE4_CR_CHROMA_QUANTIZATION_TABLE_22_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE4_CR_CHROMA_QUANTIZATION_TABLE_22_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE4_CR_CHROMA_QUANTIZATION_TABLE_21_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE4_CR_CHROMA_QUANTIZATION_TABLE_21_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE4_CR_CHROMA_QUANTIZATION_TABLE_20_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE4_CR_CHROMA_QUANTIZATION_TABLE_20_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE5_OFFSET (0x00A4) #define JASPER_CHROMA_QUANTIZATION_TABLE5_CR_CHROMA_QUANTIZATION_TABLE_27_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE5_CR_CHROMA_QUANTIZATION_TABLE_27_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE5_CR_CHROMA_QUANTIZATION_TABLE_26_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE5_CR_CHROMA_QUANTIZATION_TABLE_26_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE5_CR_CHROMA_QUANTIZATION_TABLE_25_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE5_CR_CHROMA_QUANTIZATION_TABLE_25_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE5_CR_CHROMA_QUANTIZATION_TABLE_24_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE5_CR_CHROMA_QUANTIZATION_TABLE_24_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE6_OFFSET (0x00A8) #define JASPER_CHROMA_QUANTIZATION_TABLE6_CR_CHROMA_QUANTIZATION_TABLE_33_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE6_CR_CHROMA_QUANTIZATION_TABLE_33_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE6_CR_CHROMA_QUANTIZATION_TABLE_32_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE6_CR_CHROMA_QUANTIZATION_TABLE_32_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE6_CR_CHROMA_QUANTIZATION_TABLE_31_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE6_CR_CHROMA_QUANTIZATION_TABLE_31_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE6_CR_CHROMA_QUANTIZATION_TABLE_30_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE6_CR_CHROMA_QUANTIZATION_TABLE_30_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE7_OFFSET (0x00AC) #define JASPER_CHROMA_QUANTIZATION_TABLE7_CR_CHROMA_QUANTIZATION_TABLE_37_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE7_CR_CHROMA_QUANTIZATION_TABLE_37_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE7_CR_CHROMA_QUANTIZATION_TABLE_36_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE7_CR_CHROMA_QUANTIZATION_TABLE_36_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE7_CR_CHROMA_QUANTIZATION_TABLE_35_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE7_CR_CHROMA_QUANTIZATION_TABLE_35_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE7_CR_CHROMA_QUANTIZATION_TABLE_34_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE7_CR_CHROMA_QUANTIZATION_TABLE_34_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE8_OFFSET (0x00B0) #define JASPER_CHROMA_QUANTIZATION_TABLE8_CR_CHROMA_QUANTIZATION_TABLE_43_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE8_CR_CHROMA_QUANTIZATION_TABLE_43_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE8_CR_CHROMA_QUANTIZATION_TABLE_42_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE8_CR_CHROMA_QUANTIZATION_TABLE_42_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE8_CR_CHROMA_QUANTIZATION_TABLE_41_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE8_CR_CHROMA_QUANTIZATION_TABLE_41_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE8_CR_CHROMA_QUANTIZATION_TABLE_40_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE8_CR_CHROMA_QUANTIZATION_TABLE_40_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE9_OFFSET (0x00B4) #define JASPER_CHROMA_QUANTIZATION_TABLE9_CR_CHROMA_QUANTIZATION_TABLE_47_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE9_CR_CHROMA_QUANTIZATION_TABLE_47_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE9_CR_CHROMA_QUANTIZATION_TABLE_46_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE9_CR_CHROMA_QUANTIZATION_TABLE_46_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE9_CR_CHROMA_QUANTIZATION_TABLE_45_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE9_CR_CHROMA_QUANTIZATION_TABLE_45_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE9_CR_CHROMA_QUANTIZATION_TABLE_44_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE9_CR_CHROMA_QUANTIZATION_TABLE_44_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE10_OFFSET (0x00B8) #define JASPER_CHROMA_QUANTIZATION_TABLE10_CR_CHROMA_QUANTIZATION_TABLE_53_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE10_CR_CHROMA_QUANTIZATION_TABLE_53_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE10_CR_CHROMA_QUANTIZATION_TABLE_52_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE10_CR_CHROMA_QUANTIZATION_TABLE_52_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE10_CR_CHROMA_QUANTIZATION_TABLE_51_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE10_CR_CHROMA_QUANTIZATION_TABLE_51_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE10_CR_CHROMA_QUANTIZATION_TABLE_50_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE10_CR_CHROMA_QUANTIZATION_TABLE_50_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE11_OFFSET (0x00BC) #define JASPER_CHROMA_QUANTIZATION_TABLE11_CR_CHROMA_QUANTIZATION_TABLE_57_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE11_CR_CHROMA_QUANTIZATION_TABLE_57_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE11_CR_CHROMA_QUANTIZATION_TABLE_56_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE11_CR_CHROMA_QUANTIZATION_TABLE_56_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE11_CR_CHROMA_QUANTIZATION_TABLE_55_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE11_CR_CHROMA_QUANTIZATION_TABLE_55_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE11_CR_CHROMA_QUANTIZATION_TABLE_54_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE11_CR_CHROMA_QUANTIZATION_TABLE_54_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE12_OFFSET (0x00C0) #define JASPER_CHROMA_QUANTIZATION_TABLE12_CR_CHROMA_QUANTIZATION_TABLE_63_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE12_CR_CHROMA_QUANTIZATION_TABLE_63_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE12_CR_CHROMA_QUANTIZATION_TABLE_62_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE12_CR_CHROMA_QUANTIZATION_TABLE_62_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE12_CR_CHROMA_QUANTIZATION_TABLE_61_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE12_CR_CHROMA_QUANTIZATION_TABLE_61_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE12_CR_CHROMA_QUANTIZATION_TABLE_60_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE12_CR_CHROMA_QUANTIZATION_TABLE_60_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE13_OFFSET (0x00C4) #define JASPER_CHROMA_QUANTIZATION_TABLE13_CR_CHROMA_QUANTIZATION_TABLE_67_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE13_CR_CHROMA_QUANTIZATION_TABLE_67_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE13_CR_CHROMA_QUANTIZATION_TABLE_66_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE13_CR_CHROMA_QUANTIZATION_TABLE_66_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE13_CR_CHROMA_QUANTIZATION_TABLE_65_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE13_CR_CHROMA_QUANTIZATION_TABLE_65_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE13_CR_CHROMA_QUANTIZATION_TABLE_64_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE13_CR_CHROMA_QUANTIZATION_TABLE_64_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE14_OFFSET (0x00C8) #define JASPER_CHROMA_QUANTIZATION_TABLE14_CR_CHROMA_QUANTIZATION_TABLE_73_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE14_CR_CHROMA_QUANTIZATION_TABLE_73_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE14_CR_CHROMA_QUANTIZATION_TABLE_72_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE14_CR_CHROMA_QUANTIZATION_TABLE_72_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE14_CR_CHROMA_QUANTIZATION_TABLE_71_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE14_CR_CHROMA_QUANTIZATION_TABLE_71_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE14_CR_CHROMA_QUANTIZATION_TABLE_70_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE14_CR_CHROMA_QUANTIZATION_TABLE_70_SHIFT (0) #define JASPER_CHROMA_QUANTIZATION_TABLE15_OFFSET (0x00CC) #define JASPER_CHROMA_QUANTIZATION_TABLE15_CR_CHROMA_QUANTIZATION_TABLE_77_MASK (0xFF000000) #define JASPER_CHROMA_QUANTIZATION_TABLE15_CR_CHROMA_QUANTIZATION_TABLE_77_SHIFT (24) #define JASPER_CHROMA_QUANTIZATION_TABLE15_CR_CHROMA_QUANTIZATION_TABLE_76_MASK (0x00FF0000) #define JASPER_CHROMA_QUANTIZATION_TABLE15_CR_CHROMA_QUANTIZATION_TABLE_76_SHIFT (16) #define JASPER_CHROMA_QUANTIZATION_TABLE15_CR_CHROMA_QUANTIZATION_TABLE_75_MASK (0x0000FF00) #define JASPER_CHROMA_QUANTIZATION_TABLE15_CR_CHROMA_QUANTIZATION_TABLE_75_SHIFT (8) #define JASPER_CHROMA_QUANTIZATION_TABLE15_CR_CHROMA_QUANTIZATION_TABLE_74_MASK (0x000000FF) #define JASPER_CHROMA_QUANTIZATION_TABLE15_CR_CHROMA_QUANTIZATION_TABLE_74_SHIFT (0) #define JASPER_CRC_CTRL_OFFSET (0x00D0) #define JASPER_CRC_CTRL_JASPER_CRC_ENABLE_MASK (0x00000001) #define JASPER_CRC_CTRL_JASPER_CRC_ENABLE_SHIFT (0) #define JASPER_FRONT_END_CRC_OFFSET (0x00D4) #define JASPER_FRONT_END_CRC_CR_JASPER_FRONT_END_CRC_OUT_MASK (0xFFFFFFFF) #define JASPER_FRONT_END_CRC_CR_JASPER_FRONT_END_CRC_OUT_SHIFT (0) #define JASPER_DCT_CRC_OFFSET (0x00D8) #define JASPER_DCT_CRC_CR_JASPER_DCT_CRC_OUT_MASK (0xFFFFFFFF) #define JASPER_DCT_CRC_CR_JASPER_DCT_CRC_OUT_SHIFT (0) #define JASPER_ZZ_CRC_OFFSET (0x00DC) #define JASPER_ZZ_CRC_CR_JASPER_ZZ_CRC_OUT_MASK (0xFFFFFFFF) #define JASPER_ZZ_CRC_CR_JASPER_ZZ_CRC_OUT_SHIFT (0) #define JASPER_QUANT_CRC_OFFSET (0x00E0) #define JASPER_QUANT_CRC_CR_JASPER_QUANT_CRC_OUT_MASK (0xFFFFFFFF) #define JASPER_QUANT_CRC_CR_JASPER_QUANT_CRC_OUT_SHIFT (0) #define JASPER_ENTROPY_ENCODER_CRC_OFFSET (0x00E4) #define JASPER_ENTROPY_ENCODER_CRC_CR_JASPER_ENTROPY_CRC_OUT_MASK (0xFFFFFFFF) #define JASPER_ENTROPY_ENCODER_CRC_CR_JASPER_ENTROPY_CRC_OUT_SHIFT (0) #define JASPER_PACKING_BUFFER_DATA_CRC_OFFSET (0x00E8) #define JASPER_PACKING_BUFFER_DATA_CRC_CR_JASPER_PACKING_DATA_CRC_OUT_MASK (0xFFFFFFFF) #define JASPER_PACKING_BUFFER_DATA_CRC_CR_JASPER_PACKING_DATA_CRC_OUT_SHIFT (0) #define JASPER_PACKING_BUFFER_ADDR_CRC_OFFSET (0x00EC) #define JASPER_PACKING_BUFFER_ADDR_CRC_CR_JASPER_PACKING_ADDR_OUT_CRC_MASK (0xFFFFFFFF) #define JASPER_PACKING_BUFFER_ADDR_CRC_CR_JASPER_PACKING_ADDR_OUT_CRC_SHIFT (0) #define JASPER_CORE_BYTE_SIZE (0x00F0) #endif
/* * drivers/net/ethernet/freescale/fec_mpc52xx.h * * Driver for the MPC5200 Fast Ethernet Controller * * Author: Dale Farnsworth <[email protected]> * * 2003-2004 (c) MontaVista, Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #ifndef __DRIVERS_NET_MPC52XX_FEC_H__ #define __DRIVERS_NET_MPC52XX_FEC_H__ #include <linux/phy.h> /* Tunable constant */ /* FEC_RX_BUFFER_SIZE includes 4 bytes for CRC32 */ #define FEC_RX_BUFFER_SIZE 1522 /* max receive packet size */ #define FEC_RX_NUM_BD 256 #define FEC_TX_NUM_BD 64 #define FEC_RESET_DELAY 50 /* uS */ #define FEC_WATCHDOG_TIMEOUT ((400*HZ)/1000) /* ======================================================================== */ /* Hardware register sets & bits */ /* ======================================================================== */ struct mpc52xx_fec { u32 fec_id; /* FEC + 0x000 */ u32 ievent; /* FEC + 0x004 */ u32 imask; /* FEC + 0x008 */ u32 reserved0[1]; /* FEC + 0x00C */ u32 r_des_active; /* FEC + 0x010 */ u32 x_des_active; /* FEC + 0x014 */ u32 r_des_active_cl; /* FEC + 0x018 */ u32 x_des_active_cl; /* FEC + 0x01C */ u32 ivent_set; /* FEC + 0x020 */ u32 ecntrl; /* FEC + 0x024 */ u32 reserved1[6]; /* FEC + 0x028-03C */ u32 mii_data; /* FEC + 0x040 */ u32 mii_speed; /* FEC + 0x044 */ u32 mii_status; /* FEC + 0x048 */ u32 reserved2[5]; /* FEC + 0x04C-05C */ u32 mib_data; /* FEC + 0x060 */ u32 mib_control; /* FEC + 0x064 */ u32 reserved3[6]; /* FEC + 0x068-7C */ u32 r_activate; /* FEC + 0x080 */ u32 r_cntrl; /* FEC + 0x084 */ u32 r_hash; /* FEC + 0x088 */ u32 r_data; /* FEC + 0x08C */ u32 ar_done; /* FEC + 0x090 */ u32 r_test; /* FEC + 0x094 */ u32 r_mib; /* FEC + 0x098 */ u32 r_da_low; /* FEC + 0x09C */ u32 r_da_high; /* FEC + 0x0A0 */ u32 reserved4[7]; /* FEC + 0x0A4-0BC */ u32 x_activate; /* FEC + 0x0C0 */ u32 x_cntrl; /* FEC + 0x0C4 */ u32 backoff; /* FEC + 0x0C8 */ u32 x_data; /* FEC + 0x0CC */ u32 x_status; /* FEC + 0x0D0 */ u32 x_mib; /* FEC + 0x0D4 */ u32 x_test; /* FEC + 0x0D8 */ u32 fdxfc_da1; /* FEC + 0x0DC */ u32 fdxfc_da2; /* FEC + 0x0E0 */ u32 paddr1; /* FEC + 0x0E4 */ u32 paddr2; /* FEC + 0x0E8 */ u32 op_pause; /* FEC + 0x0EC */ u32 reserved5[4]; /* FEC + 0x0F0-0FC */ u32 instr_reg; /* FEC + 0x100 */ u32 context_reg; /* FEC + 0x104 */ u32 test_cntrl; /* FEC + 0x108 */ u32 acc_reg; /* FEC + 0x10C */ u32 ones; /* FEC + 0x110 */ u32 zeros; /* FEC + 0x114 */ u32 iaddr1; /* FEC + 0x118 */ u32 iaddr2; /* FEC + 0x11C */ u32 gaddr1; /* FEC + 0x120 */ u32 gaddr2; /* FEC + 0x124 */ u32 random; /* FEC + 0x128 */ u32 rand1; /* FEC + 0x12C */ u32 tmp; /* FEC + 0x130 */ u32 reserved6[3]; /* FEC + 0x134-13C */ u32 fifo_id; /* FEC + 0x140 */ u32 x_wmrk; /* FEC + 0x144 */ u32 fcntrl; /* FEC + 0x148 */ u32 r_bound; /* FEC + 0x14C */ u32 r_fstart; /* FEC + 0x150 */ u32 r_count; /* FEC + 0x154 */ u32 r_lag; /* FEC + 0x158 */ u32 r_read; /* FEC + 0x15C */ u32 r_write; /* FEC + 0x160 */ u32 x_count; /* FEC + 0x164 */ u32 x_lag; /* FEC + 0x168 */ u32 x_retry; /* FEC + 0x16C */ u32 x_write; /* FEC + 0x170 */ u32 x_read; /* FEC + 0x174 */ u32 reserved7[2]; /* FEC + 0x178-17C */ u32 fm_cntrl; /* FEC + 0x180 */ u32 rfifo_data; /* FEC + 0x184 */ u32 rfifo_status; /* FEC + 0x188 */ u32 rfifo_cntrl; /* FEC + 0x18C */ u32 rfifo_lrf_ptr; /* FEC + 0x190 */ u32 rfifo_lwf_ptr; /* FEC + 0x194 */ u32 rfifo_alarm; /* FEC + 0x198 */ u32 rfifo_rdptr; /* FEC + 0x19C */ u32 rfifo_wrptr; /* FEC + 0x1A0 */ u32 tfifo_data; /* FEC + 0x1A4 */ u32 tfifo_status; /* FEC + 0x1A8 */ u32 tfifo_cntrl; /* FEC + 0x1AC */ u32 tfifo_lrf_ptr; /* FEC + 0x1B0 */ u32 tfifo_lwf_ptr; /* FEC + 0x1B4 */ u32 tfifo_alarm; /* FEC + 0x1B8 */ u32 tfifo_rdptr; /* FEC + 0x1BC */ u32 tfifo_wrptr; /* FEC + 0x1C0 */ u32 reset_cntrl; /* FEC + 0x1C4 */ u32 xmit_fsm; /* FEC + 0x1C8 */ u32 reserved8[3]; /* FEC + 0x1CC-1D4 */ u32 rdes_data0; /* FEC + 0x1D8 */ u32 rdes_data1; /* FEC + 0x1DC */ u32 r_length; /* FEC + 0x1E0 */ u32 x_length; /* FEC + 0x1E4 */ u32 x_addr; /* FEC + 0x1E8 */ u32 cdes_data; /* FEC + 0x1EC */ u32 status; /* FEC + 0x1F0 */ u32 dma_control; /* FEC + 0x1F4 */ u32 des_cmnd; /* FEC + 0x1F8 */ u32 data; /* FEC + 0x1FC */ u32 rmon_t_drop; /* FEC + 0x200 */ u32 rmon_t_packets; /* FEC + 0x204 */ u32 rmon_t_bc_pkt; /* FEC + 0x208 */ u32 rmon_t_mc_pkt; /* FEC + 0x20C */ u32 rmon_t_crc_align; /* FEC + 0x210 */ u32 rmon_t_undersize; /* FEC + 0x214 */ u32 rmon_t_oversize; /* FEC + 0x218 */ u32 rmon_t_frag; /* FEC + 0x21C */ u32 rmon_t_jab; /* FEC + 0x220 */ u32 rmon_t_col; /* FEC + 0x224 */ u32 rmon_t_p64; /* FEC + 0x228 */ u32 rmon_t_p65to127; /* FEC + 0x22C */ u32 rmon_t_p128to255; /* FEC + 0x230 */ u32 rmon_t_p256to511; /* FEC + 0x234 */ u32 rmon_t_p512to1023; /* FEC + 0x238 */ u32 rmon_t_p1024to2047; /* FEC + 0x23C */ u32 rmon_t_p_gte2048; /* FEC + 0x240 */ u32 rmon_t_octets; /* FEC + 0x244 */ u32 ieee_t_drop; /* FEC + 0x248 */ u32 ieee_t_frame_ok; /* FEC + 0x24C */ u32 ieee_t_1col; /* FEC + 0x250 */ u32 ieee_t_mcol; /* FEC + 0x254 */ u32 ieee_t_def; /* FEC + 0x258 */ u32 ieee_t_lcol; /* FEC + 0x25C */ u32 ieee_t_excol; /* FEC + 0x260 */ u32 ieee_t_macerr; /* FEC + 0x264 */ u32 ieee_t_cserr; /* FEC + 0x268 */ u32 ieee_t_sqe; /* FEC + 0x26C */ u32 t_fdxfc; /* FEC + 0x270 */ u32 ieee_t_octets_ok; /* FEC + 0x274 */ u32 reserved9[2]; /* FEC + 0x278-27C */ u32 rmon_r_drop; /* FEC + 0x280 */ u32 rmon_r_packets; /* FEC + 0x284 */ u32 rmon_r_bc_pkt; /* FEC + 0x288 */ u32 rmon_r_mc_pkt; /* FEC + 0x28C */ u32 rmon_r_crc_align; /* FEC + 0x290 */ u32 rmon_r_undersize; /* FEC + 0x294 */ u32 rmon_r_oversize; /* FEC + 0x298 */ u32 rmon_r_frag; /* FEC + 0x29C */ u32 rmon_r_jab; /* FEC + 0x2A0 */ u32 rmon_r_resvd_0; /* FEC + 0x2A4 */ u32 rmon_r_p64; /* FEC + 0x2A8 */ u32 rmon_r_p65to127; /* FEC + 0x2AC */ u32 rmon_r_p128to255; /* FEC + 0x2B0 */ u32 rmon_r_p256to511; /* FEC + 0x2B4 */ u32 rmon_r_p512to1023; /* FEC + 0x2B8 */ u32 rmon_r_p1024to2047; /* FEC + 0x2BC */ u32 rmon_r_p_gte2048; /* FEC + 0x2C0 */ u32 rmon_r_octets; /* FEC + 0x2C4 */ u32 ieee_r_drop; /* FEC + 0x2C8 */ u32 ieee_r_frame_ok; /* FEC + 0x2CC */ u32 ieee_r_crc; /* FEC + 0x2D0 */ u32 ieee_r_align; /* FEC + 0x2D4 */ u32 r_macerr; /* FEC + 0x2D8 */ u32 r_fdxfc; /* FEC + 0x2DC */ u32 ieee_r_octets_ok; /* FEC + 0x2E0 */ u32 reserved10[7]; /* FEC + 0x2E4-2FC */ u32 reserved11[64]; /* FEC + 0x300-3FF */ }; #define FEC_MIB_DISABLE 0x80000000 #define FEC_IEVENT_HBERR 0x80000000 #define FEC_IEVENT_BABR 0x40000000 #define FEC_IEVENT_BABT 0x20000000 #define FEC_IEVENT_GRA 0x10000000 #define FEC_IEVENT_TFINT 0x08000000 #define FEC_IEVENT_MII 0x00800000 #define FEC_IEVENT_LATE_COL 0x00200000 #define FEC_IEVENT_COL_RETRY_LIM 0x00100000 #define FEC_IEVENT_XFIFO_UN 0x00080000 #define FEC_IEVENT_XFIFO_ERROR 0x00040000 #define FEC_IEVENT_RFIFO_ERROR 0x00020000 #define FEC_IMASK_HBERR 0x80000000 #define FEC_IMASK_BABR 0x40000000 #define FEC_IMASK_BABT 0x20000000 #define FEC_IMASK_GRA 0x10000000 #define FEC_IMASK_MII 0x00800000 #define FEC_IMASK_LATE_COL 0x00200000 #define FEC_IMASK_COL_RETRY_LIM 0x00100000 #define FEC_IMASK_XFIFO_UN 0x00080000 #define FEC_IMASK_XFIFO_ERROR 0x00040000 #define FEC_IMASK_RFIFO_ERROR 0x00020000 /* all but MII, which is enabled separately */ #define FEC_IMASK_ENABLE (FEC_IMASK_HBERR | FEC_IMASK_BABR | \ FEC_IMASK_BABT | FEC_IMASK_GRA | FEC_IMASK_LATE_COL | \ FEC_IMASK_COL_RETRY_LIM | FEC_IMASK_XFIFO_UN | \ FEC_IMASK_XFIFO_ERROR | FEC_IMASK_RFIFO_ERROR) #define FEC_RCNTRL_MAX_FL_SHIFT 16 #define FEC_RCNTRL_LOOP 0x01 #define FEC_RCNTRL_DRT 0x02 #define FEC_RCNTRL_MII_MODE 0x04 #define FEC_RCNTRL_PROM 0x08 #define FEC_RCNTRL_BC_REJ 0x10 #define FEC_RCNTRL_FCE 0x20 #define FEC_TCNTRL_GTS 0x00000001 #define FEC_TCNTRL_HBC 0x00000002 #define FEC_TCNTRL_FDEN 0x00000004 #define FEC_TCNTRL_TFC_PAUSE 0x00000008 #define FEC_TCNTRL_RFC_PAUSE 0x00000010 #define FEC_ECNTRL_RESET 0x00000001 #define FEC_ECNTRL_ETHER_EN 0x00000002 #define FEC_MII_DATA_ST 0x40000000 /* Start frame */ #define FEC_MII_DATA_OP_RD 0x20000000 /* Perform read */ #define FEC_MII_DATA_OP_WR 0x10000000 /* Perform write */ #define FEC_MII_DATA_PA_MSK 0x0f800000 /* PHY Address mask */ #define FEC_MII_DATA_RA_MSK 0x007c0000 /* PHY Register mask */ #define FEC_MII_DATA_TA 0x00020000 /* Turnaround */ #define FEC_MII_DATA_DATAMSK 0x0000ffff /* PHY data mask */ #define FEC_MII_READ_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_RD | FEC_MII_DATA_TA) #define FEC_MII_WRITE_FRAME (FEC_MII_DATA_ST | FEC_MII_DATA_OP_WR | FEC_MII_DATA_TA) #define FEC_MII_DATA_RA_SHIFT 0x12 /* MII reg addr bits */ #define FEC_MII_DATA_PA_SHIFT 0x17 /* MII PHY addr bits */ #define FEC_PADDR2_TYPE 0x8808 #define FEC_OP_PAUSE_OPCODE 0x00010000 #define FEC_FIFO_WMRK_256B 0x3 #define FEC_FIFO_STATUS_ERR 0x00400000 #define FEC_FIFO_STATUS_UF 0x00200000 #define FEC_FIFO_STATUS_OF 0x00100000 #define FEC_FIFO_CNTRL_FRAME 0x08000000 #define FEC_FIFO_CNTRL_LTG_7 0x07000000 #define FEC_RESET_CNTRL_RESET_FIFO 0x02000000 #define FEC_RESET_CNTRL_ENABLE_IS_RESET 0x01000000 #define FEC_XMIT_FSM_APPEND_CRC 0x02000000 #define FEC_XMIT_FSM_ENABLE_CRC 0x01000000 extern struct platform_driver mpc52xx_fec_mdio_driver; #endif /* __DRIVERS_NET_MPC52XX_FEC_H__ */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved. * Copyright (c) 2022. Qualcomm Innovation Center, Inc. All rights reserved. * Copyright (c) 2023, Richard Acayan. All rights reserved. */ #ifndef _DPU_4_1_SDM670_H #define _DPU_4_1_SDM670_H static const struct dpu_mdp_cfg sdm670_mdp = { .name = "top_0", .base = 0x0, .len = 0x45c, .features = BIT(DPU_MDP_AUDIO_SELECT), .clk_ctrls = { [DPU_CLK_CTRL_VIG0] = { .reg_off = 0x2ac, .bit_off = 0 }, [DPU_CLK_CTRL_VIG1] = { .reg_off = 0x2b4, .bit_off = 0 }, [DPU_CLK_CTRL_DMA0] = { .reg_off = 0x2ac, .bit_off = 8 }, [DPU_CLK_CTRL_DMA1] = { .reg_off = 0x2b4, .bit_off = 8 }, [DPU_CLK_CTRL_DMA2] = { .reg_off = 0x2bc, .bit_off = 8 }, }, }; static const struct dpu_sspp_cfg sdm670_sspp[] = { { .name = "sspp_0", .id = SSPP_VIG0, .base = 0x4000, .len = 0x1c8, .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_3, .xin_id = 0, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, }, { .name = "sspp_1", .id = SSPP_VIG1, .base = 0x6000, .len = 0x1c8, .features = VIG_SDM845_MASK_SDMA, .sblk = &dpu_vig_sblk_qseed3_1_3, .xin_id = 4, .type = SSPP_TYPE_VIG, .clk_ctrl = DPU_CLK_CTRL_VIG0, }, { .name = "sspp_8", .id = SSPP_DMA0, .base = 0x24000, .len = 0x1c8, .features = DMA_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 1, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA0, }, { .name = "sspp_9", .id = SSPP_DMA1, .base = 0x26000, .len = 0x1c8, .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 5, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA1, }, { .name = "sspp_10", .id = SSPP_DMA2, .base = 0x28000, .len = 0x1c8, .features = DMA_CURSOR_SDM845_MASK_SDMA, .sblk = &dpu_dma_sblk, .xin_id = 9, .type = SSPP_TYPE_DMA, .clk_ctrl = DPU_CLK_CTRL_DMA2, }, }; static const struct dpu_dsc_cfg sdm670_dsc[] = { { .name = "dsc_0", .id = DSC_0, .base = 0x80000, .len = 0x140, }, { .name = "dsc_1", .id = DSC_1, .base = 0x80400, .len = 0x140, }, }; static const struct dpu_mdss_version sdm670_mdss_ver = { .core_major_ver = 4, .core_minor_ver = 1, }; const struct dpu_mdss_cfg dpu_sdm670_cfg = { .mdss_ver = &sdm670_mdss_ver, .caps = &sdm845_dpu_caps, .mdp = &sdm670_mdp, .ctl_count = ARRAY_SIZE(sdm845_ctl), .ctl = sdm845_ctl, .sspp_count = ARRAY_SIZE(sdm670_sspp), .sspp = sdm670_sspp, .mixer_count = ARRAY_SIZE(sdm845_lm), .mixer = sdm845_lm, .pingpong_count = ARRAY_SIZE(sdm845_pp), .pingpong = sdm845_pp, .dsc_count = ARRAY_SIZE(sdm670_dsc), .dsc = sdm670_dsc, .intf_count = ARRAY_SIZE(sdm845_intf), .intf = sdm845_intf, .vbif_count = ARRAY_SIZE(sdm845_vbif), .vbif = sdm845_vbif, .perf = &sdm845_perf_data, }; #endif
// SPDX-License-Identifier: GPL-2.0 /* * Panel driver for the Samsung S6D27A1 480x800 DPI RGB panel. * Found in the Samsung Galaxy Ace 2 GT-I8160 mobile phone. */ #include <drm/drm_mipi_dbi.h> #include <drm/drm_modes.h> #include <drm/drm_panel.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/media-bus-format.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <video/mipi_display.h> #define S6D27A1_PASSWD_L2 0xF0 /* Password Command for Level 2 Control */ #define S6D27A1_RESCTL 0xB3 /* Resolution Select Control */ #define S6D27A1_PANELCTL2 0xB4 /* ASG Signal Control */ #define S6D27A1_READID1 0xDA /* Read panel ID 1 */ #define S6D27A1_READID2 0xDB /* Read panel ID 2 */ #define S6D27A1_READID3 0xDC /* Read panel ID 3 */ #define S6D27A1_DISPCTL 0xF2 /* Display Control */ #define S6D27A1_MANPWR 0xF3 /* Manual Control */ #define S6D27A1_PWRCTL1 0xF4 /* Power Control */ #define S6D27A1_SRCCTL 0xF6 /* Source Control */ #define S6D27A1_PANELCTL 0xF7 /* Panel Control*/ static const u8 s6d27a1_dbi_read_commands[] = { S6D27A1_READID1, S6D27A1_READID2, S6D27A1_READID3, 0, /* sentinel */ }; struct s6d27a1 { struct device *dev; struct mipi_dbi dbi; struct drm_panel panel; struct gpio_desc *reset; struct regulator_bulk_data regulators[2]; }; static const struct drm_display_mode s6d27a1_480_800_mode = { /* * The vendor driver states that the S6D27A1 panel * has a pixel clock frequency of 49920000 Hz / 2 = 24960000 Hz. */ .clock = 24960, .hdisplay = 480, .hsync_start = 480 + 63, .hsync_end = 480 + 63 + 2, .htotal = 480 + 63 + 2 + 63, .vdisplay = 800, .vsync_start = 800 + 11, .vsync_end = 800 + 11 + 2, .vtotal = 800 + 11 + 2 + 10, .width_mm = 50, .height_mm = 84, .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC, }; static inline struct s6d27a1 *to_s6d27a1(struct drm_panel *panel) { return container_of(panel, struct s6d27a1, panel); } static void s6d27a1_read_mtp_id(struct s6d27a1 *ctx) { struct mipi_dbi *dbi = &ctx->dbi; u8 id1, id2, id3; int ret; ret = mipi_dbi_command_read(dbi, S6D27A1_READID1, &id1); if (ret) { dev_err(ctx->dev, "unable to read MTP ID 1\n"); return; } ret = mipi_dbi_command_read(dbi, S6D27A1_READID2, &id2); if (ret) { dev_err(ctx->dev, "unable to read MTP ID 2\n"); return; } ret = mipi_dbi_command_read(dbi, S6D27A1_READID3, &id3); if (ret) { dev_err(ctx->dev, "unable to read MTP ID 3\n"); return; } dev_info(ctx->dev, "MTP ID: %02x %02x %02x\n", id1, id2, id3); } static int s6d27a1_power_on(struct s6d27a1 *ctx) { struct mipi_dbi *dbi = &ctx->dbi; int ret; /* Power up */ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->regulators), ctx->regulators); if (ret) { dev_err(ctx->dev, "failed to enable regulators: %d\n", ret); return ret; } msleep(20); /* Assert reset >=1 ms */ gpiod_set_value_cansleep(ctx->reset, 1); usleep_range(1000, 5000); /* De-assert reset */ gpiod_set_value_cansleep(ctx->reset, 0); /* Wait >= 10 ms */ msleep(20); /* * Exit sleep mode and initialize display - some hammering is * necessary. */ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE); msleep(120); /* Magic to unlock level 2 control of the display */ mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0x5A, 0x5A); /* Configure resolution to 480RGBx800 */ mipi_dbi_command(dbi, S6D27A1_RESCTL, 0x22); mipi_dbi_command(dbi, S6D27A1_PANELCTL2, 0x00, 0x02, 0x03, 0x04, 0x05, 0x08, 0x00, 0x0c); mipi_dbi_command(dbi, S6D27A1_MANPWR, 0x01, 0x00, 0x00, 0x08, 0x08, 0x02, 0x00); mipi_dbi_command(dbi, S6D27A1_DISPCTL, 0x19, 0x00, 0x08, 0x0D, 0x03, 0x41, 0x3F); mipi_dbi_command(dbi, S6D27A1_PWRCTL1, 0x00, 0x00, 0x00, 0x00, 0x55, 0x44, 0x05, 0x88, 0x4B, 0x50); mipi_dbi_command(dbi, S6D27A1_SRCCTL, 0x03, 0x09, 0x8A, 0x00, 0x01, 0x16); mipi_dbi_command(dbi, S6D27A1_PANELCTL, 0x00, 0x05, 0x06, 0x07, 0x08, 0x01, 0x09, 0x0D, 0x0A, 0x0E, 0x0B, 0x0F, 0x0C, 0x10, 0x01, 0x11, 0x12, 0x13, 0x14, 0x05, 0x06, 0x07, 0x08, 0x01, 0x09, 0x0D, 0x0A, 0x0E, 0x0B, 0x0F, 0x0C, 0x10, 0x01, 0x11, 0x12, 0x13, 0x14); /* lock the level 2 control */ mipi_dbi_command(dbi, S6D27A1_PASSWD_L2, 0xA5, 0xA5); s6d27a1_read_mtp_id(ctx); return 0; } static int s6d27a1_power_off(struct s6d27a1 *ctx) { /* Go into RESET and disable regulators */ gpiod_set_value_cansleep(ctx->reset, 1); return regulator_bulk_disable(ARRAY_SIZE(ctx->regulators), ctx->regulators); } static int s6d27a1_unprepare(struct drm_panel *panel) { struct s6d27a1 *ctx = to_s6d27a1(panel); struct mipi_dbi *dbi = &ctx->dbi; mipi_dbi_command(dbi, MIPI_DCS_ENTER_SLEEP_MODE); msleep(120); return s6d27a1_power_off(to_s6d27a1(panel)); } static int s6d27a1_disable(struct drm_panel *panel) { struct s6d27a1 *ctx = to_s6d27a1(panel); struct mipi_dbi *dbi = &ctx->dbi; mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_OFF); msleep(25); return 0; } static int s6d27a1_prepare(struct drm_panel *panel) { return s6d27a1_power_on(to_s6d27a1(panel)); } static int s6d27a1_enable(struct drm_panel *panel) { struct s6d27a1 *ctx = to_s6d27a1(panel); struct mipi_dbi *dbi = &ctx->dbi; mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON); return 0; } static int s6d27a1_get_modes(struct drm_panel *panel, struct drm_connector *connector) { struct s6d27a1 *ctx = to_s6d27a1(panel); struct drm_display_mode *mode; static const u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24; mode = drm_mode_duplicate(connector->dev, &s6d27a1_480_800_mode); if (!mode) { dev_err(ctx->dev, "failed to add mode\n"); return -ENOMEM; } connector->display_info.bpc = 8; connector->display_info.width_mm = mode->width_mm; connector->display_info.height_mm = mode->height_mm; connector->display_info.bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE; drm_display_info_set_bus_formats(&connector->display_info, &bus_format, 1); drm_mode_set_name(mode); mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); return 1; } static const struct drm_panel_funcs s6d27a1_drm_funcs = { .disable = s6d27a1_disable, .unprepare = s6d27a1_unprepare, .prepare = s6d27a1_prepare, .enable = s6d27a1_enable, .get_modes = s6d27a1_get_modes, }; static int s6d27a1_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct s6d27a1 *ctx; int ret; ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->dev = dev; /* * VCI is the analog voltage supply * VCCIO is the digital I/O voltage supply */ ctx->regulators[0].supply = "vci"; ctx->regulators[1].supply = "vccio"; ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->regulators), ctx->regulators); if (ret) return dev_err_probe(dev, ret, "failed to get regulators\n"); ctx->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(ctx->reset)) { ret = PTR_ERR(ctx->reset); return dev_err_probe(dev, ret, "no RESET GPIO\n"); } ret = mipi_dbi_spi_init(spi, &ctx->dbi, NULL); if (ret) return dev_err_probe(dev, ret, "MIPI DBI init failed\n"); ctx->dbi.read_commands = s6d27a1_dbi_read_commands; drm_panel_init(&ctx->panel, dev, &s6d27a1_drm_funcs, DRM_MODE_CONNECTOR_DPI); ret = drm_panel_of_backlight(&ctx->panel); if (ret) return dev_err_probe(dev, ret, "failed to add backlight\n"); spi_set_drvdata(spi, ctx); drm_panel_add(&ctx->panel); return 0; } static void s6d27a1_remove(struct spi_device *spi) { struct s6d27a1 *ctx = spi_get_drvdata(spi); drm_panel_remove(&ctx->panel); } static const struct of_device_id s6d27a1_match[] = { { .compatible = "samsung,s6d27a1", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, s6d27a1_match); static struct spi_driver s6d27a1_driver = { .probe = s6d27a1_probe, .remove = s6d27a1_remove, .driver = { .name = "s6d27a1-panel", .of_match_table = s6d27a1_match, }, }; module_spi_driver(s6d27a1_driver); MODULE_AUTHOR("Markuss Broks <[email protected]>"); MODULE_DESCRIPTION("Samsung S6D27A1 panel driver"); MODULE_LICENSE("GPL v2");
/* * JFFS2 -- Journalling Flash File System, Version 2. * * Copyright © 2001-2007 Red Hat, Inc. * Copyright © 2004 Thomas Gleixner <[email protected]> * * Created by David Woodhouse <[email protected]> * Modified debugged and enhanced by Thomas Gleixner <[email protected]> * * For licensing information, see the file 'LICENCE' in this directory. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/crc32.h> #include <linux/mtd/rawnand.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/writeback.h> #include "nodelist.h" /* For testing write failures */ #undef BREAKME #undef BREAKMEHEADER #ifdef BREAKME static unsigned char *brokenbuf; #endif #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c->wbuf_pagesize) ) #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) ) /* max. erase failures before we mark a block bad */ #define MAX_ERASE_FAILURES 2 struct jffs2_inodirty { uint32_t ino; struct jffs2_inodirty *next; }; static struct jffs2_inodirty inodirty_nomem; static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inodirty *this = c->wbuf_inodes; /* If a malloc failed, consider _everything_ dirty */ if (this == &inodirty_nomem) return 1; /* If ino == 0, _any_ non-GC writes mean 'yes' */ if (this && !ino) return 1; /* Look to see if the inode in question is pending in the wbuf */ while (this) { if (this->ino == ino) return 1; this = this->next; } return 0; } static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c) { struct jffs2_inodirty *this; this = c->wbuf_inodes; if (this != &inodirty_nomem) { while (this) { struct jffs2_inodirty *next = this->next; kfree(this); this = next; } } c->wbuf_inodes = NULL; } static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) { struct jffs2_inodirty *new; /* Schedule delayed write-buffer write-out */ jffs2_dirty_trigger(c); if (jffs2_wbuf_pending_for_ino(c, ino)) return; new = kmalloc(sizeof(*new), GFP_KERNEL); if (!new) { jffs2_dbg(1, "No memory to allocate inodirty. Fallback to all considered dirty\n"); jffs2_clear_wbuf_ino_list(c); c->wbuf_inodes = &inodirty_nomem; return; } new->ino = ino; new->next = c->wbuf_inodes; c->wbuf_inodes = new; return; } static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) { struct list_head *this, *next; static int n; if (list_empty(&c->erasable_pending_wbuf_list)) return; list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { struct jffs2_eraseblock *jeb = list_entry(this, struct jffs2_eraseblock, list); jffs2_dbg(1, "Removing eraseblock at 0x%08x from erasable_pending_wbuf_list...\n", jeb->offset); list_del(this); if ((jiffies + (n++)) & 127) { /* Most of the time, we just erase it immediately. Otherwise we spend ages scanning it on mount, etc. */ jffs2_dbg(1, "...and adding to erase_pending_list\n"); list_add_tail(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; jffs2_garbage_collect_trigger(c); } else { /* Sometimes, however, we leave it elsewhere so it doesn't get immediately reused, and we spread the load a bit. */ jffs2_dbg(1, "...and adding to erasable_list\n"); list_add_tail(&jeb->list, &c->erasable_list); } } } #define REFILE_NOTEMPTY 0 #define REFILE_ANYWAY 1 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_empty) { jffs2_dbg(1, "About to refile bad block at %08x\n", jeb->offset); /* File the existing block on the bad_used_list.... */ if (c->nextblock == jeb) c->nextblock = NULL; else /* Not sure this should ever happen... need more coffee */ list_del(&jeb->list); if (jeb->first_node) { jffs2_dbg(1, "Refiling block at %08x to bad_used_list\n", jeb->offset); list_add(&jeb->list, &c->bad_used_list); } else { BUG_ON(allow_empty == REFILE_NOTEMPTY); /* It has to have had some nodes or we couldn't be here */ jffs2_dbg(1, "Refiling block at %08x to erase_pending_list\n", jeb->offset); list_add(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; jffs2_garbage_collect_trigger(c); } if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) { uint32_t oldfree = jeb->free_size; jffs2_link_node_ref(c, jeb, (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE, oldfree, NULL); /* convert to wasted */ c->wasted_size += oldfree; jeb->wasted_size += oldfree; c->dirty_size -= oldfree; jeb->dirty_size -= oldfree; } jffs2_dbg_dump_block_lists_nolock(c); jffs2_dbg_acct_sanity_check_nolock(c,jeb); jffs2_dbg_acct_paranoia_check_nolock(c, jeb); } static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c, struct jffs2_inode_info *f, struct jffs2_raw_node_ref *raw, union jffs2_node_union *node) { struct jffs2_node_frag *frag; struct jffs2_full_dirent *fd; dbg_noderef("incore_replace_raw: node at %p is {%04x,%04x}\n", node, je16_to_cpu(node->u.magic), je16_to_cpu(node->u.nodetype)); BUG_ON(je16_to_cpu(node->u.magic) != 0x1985 && je16_to_cpu(node->u.magic) != 0); switch (je16_to_cpu(node->u.nodetype)) { case JFFS2_NODETYPE_INODE: if (f->metadata && f->metadata->raw == raw) { dbg_noderef("Will replace ->raw in f->metadata at %p\n", f->metadata); return &f->metadata->raw; } frag = jffs2_lookup_node_frag(&f->fragtree, je32_to_cpu(node->i.offset)); BUG_ON(!frag); /* Find a frag which refers to the full_dnode we want to modify */ while (!frag->node || frag->node->raw != raw) { frag = frag_next(frag); BUG_ON(!frag); } dbg_noderef("Will replace ->raw in full_dnode at %p\n", frag->node); return &frag->node->raw; case JFFS2_NODETYPE_DIRENT: for (fd = f->dents; fd; fd = fd->next) { if (fd->raw == raw) { dbg_noderef("Will replace ->raw in full_dirent at %p\n", fd); return &fd->raw; } } BUG(); default: dbg_noderef("Don't care about replacing raw for nodetype %x\n", je16_to_cpu(node->u.nodetype)); break; } return NULL; } #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf, uint32_t ofs) { int ret; size_t retlen; char *eccstr; ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); if (ret && ret != -EUCLEAN && ret != -EBADMSG) { pr_warn("%s(): Read back of page at %08x failed: %d\n", __func__, c->wbuf_ofs, ret); return ret; } else if (retlen != c->wbuf_pagesize) { pr_warn("%s(): Read back of page at %08x gave short read: %zd not %d\n", __func__, ofs, retlen, c->wbuf_pagesize); return -EIO; } if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize)) return 0; if (ret == -EUCLEAN) eccstr = "corrected"; else if (ret == -EBADMSG) eccstr = "correction failed"; else eccstr = "OK or unused"; pr_warn("Write verify error (ECC %s) at %08x. Wrote:\n", eccstr, c->wbuf_ofs); print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, c->wbuf, c->wbuf_pagesize, 0); pr_warn("Read back:\n"); print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, c->wbuf_verify, c->wbuf_pagesize, 0); return -EIO; } #else #define jffs2_verify_write(c,b,o) (0) #endif /* Recover from failure to write wbuf. Recover the nodes up to the * wbuf, not the one which we were starting to try to write. */ static void jffs2_wbuf_recover(struct jffs2_sb_info *c) { struct jffs2_eraseblock *jeb, *new_jeb; struct jffs2_raw_node_ref *raw, *next, *first_raw = NULL; size_t retlen; int ret; int nr_refile = 0; unsigned char *buf; uint32_t start, end, ofs, len; jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; spin_lock(&c->erase_completion_lock); if (c->wbuf_ofs % c->mtd->erasesize) jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); else jffs2_block_refile(c, jeb, REFILE_ANYWAY); spin_unlock(&c->erase_completion_lock); BUG_ON(!ref_obsolete(jeb->last_node)); /* Find the first node to be recovered, by skipping over every node which ends before the wbuf starts, or which is obsolete. */ for (next = raw = jeb->first_node; next; raw = next) { next = ref_next(raw); if (ref_obsolete(raw) || (next && ref_offset(next) <= c->wbuf_ofs)) { dbg_noderef("Skipping node at 0x%08x(%d)-0x%08x which is either before 0x%08x or obsolete\n", ref_offset(raw), ref_flags(raw), (ref_offset(raw) + ref_totlen(c, jeb, raw)), c->wbuf_ofs); continue; } dbg_noderef("First node to be recovered is at 0x%08x(%d)-0x%08x\n", ref_offset(raw), ref_flags(raw), (ref_offset(raw) + ref_totlen(c, jeb, raw))); first_raw = raw; break; } if (!first_raw) { /* All nodes were obsolete. Nothing to recover. */ jffs2_dbg(1, "No non-obsolete nodes to be recovered. Just filing block bad\n"); c->wbuf_len = 0; return; } start = ref_offset(first_raw); end = ref_offset(jeb->last_node); nr_refile = 1; /* Count the number of refs which need to be copied */ while ((raw = ref_next(raw)) != jeb->last_node) nr_refile++; dbg_noderef("wbuf recover %08x-%08x (%d bytes in %d nodes)\n", start, end, end - start, nr_refile); buf = NULL; if (start < c->wbuf_ofs) { /* First affected node was already partially written. * Attempt to reread the old data into our buffer. */ buf = kmalloc(end - start, GFP_KERNEL); if (!buf) { pr_crit("Malloc failure in wbuf recovery. Data loss ensues.\n"); goto read_failed; } /* Do the read... */ ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen, buf); /* ECC recovered ? */ if ((ret == -EUCLEAN || ret == -EBADMSG) && (retlen == c->wbuf_ofs - start)) ret = 0; if (ret || retlen != c->wbuf_ofs - start) { pr_crit("Old data are already lost in wbuf recovery. Data loss ensues.\n"); kfree(buf); buf = NULL; read_failed: first_raw = ref_next(first_raw); nr_refile--; while (first_raw && ref_obsolete(first_raw)) { first_raw = ref_next(first_raw); nr_refile--; } /* If this was the only node to be recovered, give up */ if (!first_raw) { c->wbuf_len = 0; return; } /* It wasn't. Go on and try to recover nodes complete in the wbuf */ start = ref_offset(first_raw); dbg_noderef("wbuf now recover %08x-%08x (%d bytes in %d nodes)\n", start, end, end - start, nr_refile); } else { /* Read succeeded. Copy the remaining data from the wbuf */ memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); } } /* OK... we're to rewrite (end-start) bytes of data from first_raw onwards. Either 'buf' contains the data, or we find it in the wbuf */ /* ... and get an allocation of space from a shiny new block instead */ ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); if (ret) { pr_warn("Failed to allocate space for wbuf recovery. Data loss ensues.\n"); kfree(buf); return; } /* The summary is not recovered, so it must be disabled for this erase block */ jffs2_sum_disable_collecting(c->summary); ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); if (ret) { pr_warn("Failed to allocate node refs for wbuf recovery. Data loss ensues.\n"); kfree(buf); return; } ofs = write_ofs(c); if (end-start >= c->wbuf_pagesize) { /* Need to do another write immediately, but it's possible that this is just because the wbuf itself is completely full, and there's nothing earlier read back from the flash. Hence 'buf' isn't necessarily what we're writing from. */ unsigned char *rewrite_buf = buf?:c->wbuf; uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); jffs2_dbg(1, "Write 0x%x bytes at 0x%08x in wbuf recover\n", towrite, ofs); #ifdef BREAKMEHEADER static int breakme; if (breakme++ == 20) { pr_notice("Faking write error at 0x%08x\n", ofs); breakme = 0; mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); ret = -EIO; } else #endif ret = mtd_write(c->mtd, ofs, towrite, &retlen, rewrite_buf); if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { /* Argh. We tried. Really we did. */ pr_crit("Recovery of wbuf failed due to a second write error\n"); kfree(buf); if (retlen) jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL); return; } pr_notice("Recovery of wbuf succeeded to %08x\n", ofs); c->wbuf_len = (end - start) - towrite; c->wbuf_ofs = ofs + towrite; memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ } else { /* OK, now we're left with the dregs in whichever buffer we're using */ if (buf) { memcpy(c->wbuf, buf, end-start); } else { memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); } c->wbuf_ofs = ofs; c->wbuf_len = end - start; } /* Now sort out the jffs2_raw_node_refs, moving them from the old to the next block */ new_jeb = &c->blocks[ofs / c->sector_size]; spin_lock(&c->erase_completion_lock); for (raw = first_raw; raw != jeb->last_node; raw = ref_next(raw)) { uint32_t rawlen = ref_totlen(c, jeb, raw); struct jffs2_inode_cache *ic; struct jffs2_raw_node_ref *new_ref; struct jffs2_raw_node_ref **adjust_ref = NULL; struct jffs2_inode_info *f = NULL; jffs2_dbg(1, "Refiling block of %08x at %08x(%d) to %08x\n", rawlen, ref_offset(raw), ref_flags(raw), ofs); ic = jffs2_raw_ref_to_ic(raw); /* Ick. This XATTR mess should be fixed shortly... */ if (ic && ic->class == RAWNODE_CLASS_XATTR_DATUM) { struct jffs2_xattr_datum *xd = (void *)ic; BUG_ON(xd->node != raw); adjust_ref = &xd->node; raw->next_in_ino = NULL; ic = NULL; } else if (ic && ic->class == RAWNODE_CLASS_XATTR_REF) { struct jffs2_xattr_datum *xr = (void *)ic; BUG_ON(xr->node != raw); adjust_ref = &xr->node; raw->next_in_ino = NULL; ic = NULL; } else if (ic && ic->class == RAWNODE_CLASS_INODE_CACHE) { struct jffs2_raw_node_ref **p = &ic->nodes; /* Remove the old node from the per-inode list */ while (*p && *p != (void *)ic) { if (*p == raw) { (*p) = (raw->next_in_ino); raw->next_in_ino = NULL; break; } p = &((*p)->next_in_ino); } if (ic->state == INO_STATE_PRESENT && !ref_obsolete(raw)) { /* If it's an in-core inode, then we have to adjust any full_dirent or full_dnode structure to point to the new version instead of the old */ f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink); if (IS_ERR(f)) { /* Should never happen; it _must_ be present */ JFFS2_ERROR("Failed to iget() ino #%u, err %ld\n", ic->ino, PTR_ERR(f)); BUG(); } /* We don't lock f->sem. There's a number of ways we could end up in here with it already being locked, and nobody's going to modify it on us anyway because we hold the alloc_sem. We're only changing one ->raw pointer too, which we can get away with without upsetting readers. */ adjust_ref = jffs2_incore_replace_raw(c, f, raw, (void *)(buf?:c->wbuf) + (ref_offset(raw) - start)); } else if (unlikely(ic->state != INO_STATE_PRESENT && ic->state != INO_STATE_CHECKEDABSENT && ic->state != INO_STATE_GC)) { JFFS2_ERROR("Inode #%u is in strange state %d!\n", ic->ino, ic->state); BUG(); } } new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic); if (adjust_ref) { BUG_ON(*adjust_ref != raw); *adjust_ref = new_ref; } if (f) jffs2_gc_release_inode(c, f); if (!ref_obsolete(raw)) { jeb->dirty_size += rawlen; jeb->used_size -= rawlen; c->dirty_size += rawlen; c->used_size -= rawlen; raw->flash_offset = ref_offset(raw) | REF_OBSOLETE; BUG_ON(raw->next_in_ino); } ofs += rawlen; } kfree(buf); /* Fix up the original jeb now it's on the bad_list */ if (first_raw == jeb->first_node) { jffs2_dbg(1, "Failing block at %08x is now empty. Moving to erase_pending_list\n", jeb->offset); list_move(&jeb->list, &c->erase_pending_list); c->nr_erasing_blocks++; jffs2_garbage_collect_trigger(c); } jffs2_dbg_acct_sanity_check_nolock(c, jeb); jffs2_dbg_acct_paranoia_check_nolock(c, jeb); jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); spin_unlock(&c->erase_completion_lock); jffs2_dbg(1, "wbuf recovery completed OK. wbuf_ofs 0x%08x, len 0x%x\n", c->wbuf_ofs, c->wbuf_len); } /* Meaning of pad argument: 0: Do not pad. Probably pointless - we only ever use this when we can't pad anyway. 1: Pad, do not adjust nextblock free_size 2: Pad, adjust nextblock free_size */ #define NOPAD 0 #define PAD_NOACCOUNT 1 #define PAD_ACCOUNTING 2 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) { struct jffs2_eraseblock *wbuf_jeb; int ret; size_t retlen; /* Nothing to do if not write-buffering the flash. In particular, we shouldn't del_timer() the timer we never initialised. */ if (!jffs2_is_writebuffered(c)) return 0; if (!mutex_is_locked(&c->alloc_sem)) { pr_crit("jffs2_flush_wbuf() called with alloc_sem not locked!\n"); BUG(); } if (!c->wbuf_len) /* already checked c->wbuf above */ return 0; wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1)) return -ENOMEM; /* claim remaining space on the page this happens, if we have a change to a new block, or if fsync forces us to flush the writebuffer. if we have a switch to next page, we will not have enough remaining space for this. */ if (pad ) { c->wbuf_len = PAD(c->wbuf_len); /* Pad with JFFS2_DIRTY_BITMASK initially. this helps out ECC'd NOR with 8 byte page size */ memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); padnode->magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); padnode->nodetype = cpu_to_je16(JFFS2_NODETYPE_PADDING); padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len); padnode->hdr_crc = cpu_to_je32(crc32(0, padnode, sizeof(*padnode)-4)); } } /* else jffs2_flash_writev has actually filled in the rest of the buffer for us, and will deal with the node refs etc. later. */ #ifdef BREAKME static int breakme; if (breakme++ == 20) { pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs); breakme = 0; mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, brokenbuf); ret = -EIO; } else #endif ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, c->wbuf); if (ret) { pr_warn("jffs2_flush_wbuf(): Write failed with %d\n", ret); goto wfail; } else if (retlen != c->wbuf_pagesize) { pr_warn("jffs2_flush_wbuf(): Write was short: %zd instead of %d\n", retlen, c->wbuf_pagesize); ret = -EIO; goto wfail; } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) { wfail: jffs2_wbuf_recover(c); return ret; } /* Adjust free size of the block if we padded. */ if (pad) { uint32_t waste = c->wbuf_pagesize - c->wbuf_len; jffs2_dbg(1, "jffs2_flush_wbuf() adjusting free_size of %sblock at %08x\n", (wbuf_jeb == c->nextblock) ? "next" : "", wbuf_jeb->offset); /* wbuf_pagesize - wbuf_len is the amount of space that's to be padded. If there is less free space in the block than that, something screwed up */ if (wbuf_jeb->free_size < waste) { pr_crit("jffs2_flush_wbuf(): Accounting error. wbuf at 0x%08x has 0x%03x bytes, 0x%03x left.\n", c->wbuf_ofs, c->wbuf_len, waste); pr_crit("jffs2_flush_wbuf(): But free_size for block at 0x%08x is only 0x%08x\n", wbuf_jeb->offset, wbuf_jeb->free_size); BUG(); } spin_lock(&c->erase_completion_lock); jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL); /* FIXME: that made it count as dirty. Convert to wasted */ wbuf_jeb->dirty_size -= waste; c->dirty_size -= waste; wbuf_jeb->wasted_size += waste; c->wasted_size += waste; } else spin_lock(&c->erase_completion_lock); /* Stick any now-obsoleted blocks on the erase_pending_list */ jffs2_refile_wbuf_blocks(c); jffs2_clear_wbuf_ino_list(c); spin_unlock(&c->erase_completion_lock); memset(c->wbuf,0xff,c->wbuf_pagesize); /* adjust write buffer offset, else we get a non contiguous write bug */ c->wbuf_ofs += c->wbuf_pagesize; c->wbuf_len = 0; return 0; } /* Trigger garbage collection to flush the write-buffer. If ino arg is zero, do it if _any_ real (i.e. not GC) writes are outstanding. If ino arg non-zero, do it only if a write for the given inode is outstanding. */ int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) { uint32_t old_wbuf_ofs; uint32_t old_wbuf_len; int ret = 0; jffs2_dbg(1, "jffs2_flush_wbuf_gc() called for ino #%u...\n", ino); if (!c->wbuf) return 0; mutex_lock(&c->alloc_sem); if (!jffs2_wbuf_pending_for_ino(c, ino)) { jffs2_dbg(1, "Ino #%d not pending in wbuf. Returning\n", ino); mutex_unlock(&c->alloc_sem); return 0; } old_wbuf_ofs = c->wbuf_ofs; old_wbuf_len = c->wbuf_len; if (c->unchecked_size) { /* GC won't make any progress for a while */ jffs2_dbg(1, "%s(): padding. Not finished checking\n", __func__); down_write(&c->wbuf_sem); ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); /* retry flushing wbuf in case jffs2_wbuf_recover left some data in the wbuf */ if (ret) ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); up_write(&c->wbuf_sem); } else while (old_wbuf_len && old_wbuf_ofs == c->wbuf_ofs) { mutex_unlock(&c->alloc_sem); jffs2_dbg(1, "%s(): calls gc pass\n", __func__); ret = jffs2_garbage_collect_pass(c); if (ret) { /* GC failed. Flush it with padding instead */ mutex_lock(&c->alloc_sem); down_write(&c->wbuf_sem); ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); /* retry flushing wbuf in case jffs2_wbuf_recover left some data in the wbuf */ if (ret) ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); up_write(&c->wbuf_sem); break; } mutex_lock(&c->alloc_sem); } jffs2_dbg(1, "%s(): ends...\n", __func__); mutex_unlock(&c->alloc_sem); return ret; } /* Pad write-buffer to end and write it, wasting space. */ int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) { int ret; if (!c->wbuf) return 0; down_write(&c->wbuf_sem); ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); /* retry - maybe wbuf recover left some data in wbuf. */ if (ret) ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); up_write(&c->wbuf_sem); return ret; } static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf, size_t len) { if (len && !c->wbuf_len && (len >= c->wbuf_pagesize)) return 0; if (len > (c->wbuf_pagesize - c->wbuf_len)) len = c->wbuf_pagesize - c->wbuf_len; memcpy(c->wbuf + c->wbuf_len, buf, len); c->wbuf_len += (uint32_t) len; return len; } int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, unsigned long count, loff_t to, size_t *retlen, uint32_t ino) { struct jffs2_eraseblock *jeb; size_t wbuf_retlen, donelen = 0; uint32_t outvec_to = to; int ret, invec; /* If not writebuffered flash, don't bother */ if (!jffs2_is_writebuffered(c)) return jffs2_flash_direct_writev(c, invecs, count, to, retlen); down_write(&c->wbuf_sem); /* If wbuf_ofs is not initialized, set it to target address */ if (c->wbuf_ofs == 0xFFFFFFFF) { c->wbuf_ofs = PAGE_DIV(to); c->wbuf_len = PAGE_MOD(to); memset(c->wbuf,0xff,c->wbuf_pagesize); } /* * Sanity checks on target address. It's permitted to write * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to * write at the beginning of a new erase block. Anything else, * and you die. New block starts at xxx000c (0-b = block * header) */ if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { /* It's a write to a new block */ if (c->wbuf_len) { jffs2_dbg(1, "%s(): to 0x%lx causes flush of wbuf at 0x%08x\n", __func__, (unsigned long)to, c->wbuf_ofs); ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); if (ret) goto outerr; } /* set pointer to new block */ c->wbuf_ofs = PAGE_DIV(to); c->wbuf_len = PAGE_MOD(to); } if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { /* We're not writing immediately after the writebuffer. Bad. */ pr_crit("%s(): Non-contiguous write to %08lx\n", __func__, (unsigned long)to); if (c->wbuf_len) pr_crit("wbuf was previously %08x-%08x\n", c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len); BUG(); } /* adjust alignment offset */ if (c->wbuf_len != PAGE_MOD(to)) { c->wbuf_len = PAGE_MOD(to); /* take care of alignment to next page */ if (!c->wbuf_len) { c->wbuf_len = c->wbuf_pagesize; ret = __jffs2_flush_wbuf(c, NOPAD); if (ret) goto outerr; } } for (invec = 0; invec < count; invec++) { int vlen = invecs[invec].iov_len; uint8_t *v = invecs[invec].iov_base; wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); if (c->wbuf_len == c->wbuf_pagesize) { ret = __jffs2_flush_wbuf(c, NOPAD); if (ret) goto outerr; } vlen -= wbuf_retlen; outvec_to += wbuf_retlen; donelen += wbuf_retlen; v += wbuf_retlen; if (vlen >= c->wbuf_pagesize) { ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen), &wbuf_retlen, v); if (ret < 0 || wbuf_retlen != PAGE_DIV(vlen)) goto outfile; vlen -= wbuf_retlen; outvec_to += wbuf_retlen; c->wbuf_ofs = outvec_to; donelen += wbuf_retlen; v += wbuf_retlen; } wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); if (c->wbuf_len == c->wbuf_pagesize) { ret = __jffs2_flush_wbuf(c, NOPAD); if (ret) goto outerr; } outvec_to += wbuf_retlen; donelen += wbuf_retlen; } /* * If there's a remainder in the wbuf and it's a non-GC write, * remember that the wbuf affects this ino */ *retlen = donelen; if (jffs2_sum_active()) { int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to); if (res) return res; } if (c->wbuf_len && ino) jffs2_wbuf_dirties_inode(c, ino); ret = 0; up_write(&c->wbuf_sem); return ret; outfile: /* * At this point we have no problem, c->wbuf is empty. However * refile nextblock to avoid writing again to same address. */ spin_lock(&c->erase_completion_lock); jeb = &c->blocks[outvec_to / c->sector_size]; jffs2_block_refile(c, jeb, REFILE_ANYWAY); spin_unlock(&c->erase_completion_lock); outerr: *retlen = 0; up_write(&c->wbuf_sem); return ret; } /* * This is the entry for flash write. * Check, if we work on NAND FLASH, if so build an kvec and write it via vritev */ int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, const u_char *buf) { struct kvec vecs[1]; if (!jffs2_is_writebuffered(c)) return jffs2_flash_direct_write(c, ofs, len, retlen, buf); vecs[0].iov_base = (unsigned char *) buf; vecs[0].iov_len = len; return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0); } /* Handle readback from writebuffer and ECC failure return */ int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf) { loff_t orbf = 0, owbf = 0, lwbf = 0; int ret; if (!jffs2_is_writebuffered(c)) return mtd_read(c->mtd, ofs, len, retlen, buf); /* Read flash */ down_read(&c->wbuf_sem); ret = mtd_read(c->mtd, ofs, len, retlen, buf); if ( (ret == -EBADMSG || ret == -EUCLEAN) && (*retlen == len) ) { if (ret == -EBADMSG) pr_warn("mtd->read(0x%zx bytes from 0x%llx) returned ECC error\n", len, ofs); /* * We have the raw data without ECC correction in the buffer, * maybe we are lucky and all data or parts are correct. We * check the node. If data are corrupted node check will sort * it out. We keep this block, it will fail on write or erase * and the we mark it bad. Or should we do that now? But we * should give him a chance. Maybe we had a system crash or * power loss before the ecc write or a erase was completed. * So we return success. :) */ ret = 0; } /* if no writebuffer available or write buffer empty, return */ if (!c->wbuf_pagesize || !c->wbuf_len) goto exit; /* if we read in a different block, return */ if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs)) goto exit; if (ofs >= c->wbuf_ofs) { owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */ if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ goto exit; lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ if (lwbf > len) lwbf = len; } else { orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ if (orbf > len) /* is write beyond write buffer ? */ goto exit; lwbf = len - orbf; /* number of bytes to copy */ if (lwbf > c->wbuf_len) lwbf = c->wbuf_len; } if (lwbf > 0) memcpy(buf+orbf,c->wbuf+owbf,lwbf); exit: up_read(&c->wbuf_sem); return ret; } #define NR_OOB_SCAN_PAGES 4 /* For historical reasons we use only 8 bytes for OOB clean marker */ #define OOB_CM_SIZE 8 static const struct jffs2_unknown_node oob_cleanmarker = { .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK), .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), .totlen = constant_cpu_to_je32(8) }; /* * Check, if the out of band area is empty. This function knows about the clean * marker and if it is present in OOB, treats the OOB as empty anyway. */ int jffs2_check_oob_empty(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int mode) { int i, ret; int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); struct mtd_oob_ops ops = { }; ops.mode = MTD_OPS_AUTO_OOB; ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail; ops.oobbuf = c->oobbuf; ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; ops.datbuf = NULL; ret = mtd_read_oob(c->mtd, jeb->offset, &ops); if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", jeb->offset, ops.ooblen, ops.oobretlen, ret); if (!ret || mtd_is_bitflip(ret)) ret = -EIO; return ret; } for(i = 0; i < ops.ooblen; i++) { if (mode && i < cmlen) /* Yeah, we know about the cleanmarker */ continue; if (ops.oobbuf[i] != 0xFF) { jffs2_dbg(2, "Found %02x at %x in OOB for " "%08x\n", ops.oobbuf[i], i, jeb->offset); return 1; } } return 0; } /* * Check for a valid cleanmarker. * Returns: 0 if a valid cleanmarker was found * 1 if no cleanmarker was found * negative error code if an error occurred */ int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { struct mtd_oob_ops ops = { }; int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); ops.mode = MTD_OPS_AUTO_OOB; ops.ooblen = cmlen; ops.oobbuf = c->oobbuf; ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; ops.datbuf = NULL; ret = mtd_read_oob(c->mtd, jeb->offset, &ops); if ((ret && !mtd_is_bitflip(ret)) || ops.oobretlen != ops.ooblen) { pr_err("cannot read OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", jeb->offset, ops.ooblen, ops.oobretlen, ret); if (!ret || mtd_is_bitflip(ret)) ret = -EIO; return ret; } return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen); } int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb) { int ret; struct mtd_oob_ops ops = { }; int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); ops.mode = MTD_OPS_AUTO_OOB; ops.ooblen = cmlen; ops.oobbuf = (uint8_t *)&oob_cleanmarker; ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0; ops.datbuf = NULL; ret = mtd_write_oob(c->mtd, jeb->offset, &ops); if (ret || ops.oobretlen != ops.ooblen) { pr_err("cannot write OOB for EB at %08x, requested %zd bytes, read %zd bytes, error %d\n", jeb->offset, ops.ooblen, ops.oobretlen, ret); if (!ret) ret = -EIO; return ret; } return 0; } /* * On NAND we try to mark this block bad. If the block was erased more * than MAX_ERASE_FAILURES we mark it finally bad. * Don't care about failures. This block remains on the erase-pending * or badblock list as long as nobody manipulates the flash with * a bootloader or something like that. */ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_offset) { int ret; /* if the count is < max, we try to write the counter to the 2nd page oob area */ if( ++jeb->bad_count < MAX_ERASE_FAILURES) return 0; pr_warn("marking eraseblock at %08x as bad\n", bad_offset); ret = mtd_block_markbad(c->mtd, bad_offset); if (ret) { jffs2_dbg(1, "%s(): Write failed for block at %08x: error %d\n", __func__, jeb->offset, ret); return ret; } return 1; } static struct jffs2_sb_info *work_to_sb(struct work_struct *work) { struct delayed_work *dwork; dwork = to_delayed_work(work); return container_of(dwork, struct jffs2_sb_info, wbuf_dwork); } static void delayed_wbuf_sync(struct work_struct *work) { struct jffs2_sb_info *c = work_to_sb(work); struct super_block *sb = OFNI_BS_2SFFJ(c); if (!sb_rdonly(sb)) { jffs2_dbg(1, "%s()\n", __func__); jffs2_flush_wbuf_gc(c, 0); } } void jffs2_dirty_trigger(struct jffs2_sb_info *c) { struct super_block *sb = OFNI_BS_2SFFJ(c); unsigned long delay; if (sb_rdonly(sb)) return; delay = msecs_to_jiffies(dirty_writeback_interval * 10); if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay)) jffs2_dbg(1, "%s()\n", __func__); } int jffs2_nand_flash_setup(struct jffs2_sb_info *c) { if (!c->mtd->oobsize) return 0; /* Cleanmarker is out-of-band, so inline size zero */ c->cleanmarker_size = 0; if (c->mtd->oobavail == 0) { pr_err("inconsistent device description\n"); return -EINVAL; } jffs2_dbg(1, "using OOB on NAND\n"); c->oobavail = c->mtd->oobavail; /* Initialise write buffer */ init_rwsem(&c->wbuf_sem); INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; c->oobbuf = kmalloc_array(NR_OOB_SCAN_PAGES, c->oobavail, GFP_KERNEL); if (!c->oobbuf) { kfree(c->wbuf); return -ENOMEM; } #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf_verify) { kfree(c->oobbuf); kfree(c->wbuf); return -ENOMEM; } #endif return 0; } void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) { #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY kfree(c->wbuf_verify); #endif kfree(c->wbuf); kfree(c->oobbuf); } int jffs2_dataflash_setup(struct jffs2_sb_info *c) { c->cleanmarker_size = 0; /* No cleanmarkers needed */ /* Initialize write buffer */ init_rwsem(&c->wbuf_sem); INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->erasesize; /* Find a suitable c->sector_size * - Not too much sectors * - Sectors have to be at least 4 K + some bytes * - All known dataflashes have erase sizes of 528 or 1056 * - we take at least 8 eraseblocks and want to have at least 8K size * - The concatenation should be a power of 2 */ c->sector_size = 8 * c->mtd->erasesize; while (c->sector_size < 8192) { c->sector_size *= 2; } /* It may be necessary to adjust the flash size */ c->flash_size = c->mtd->size; if ((c->flash_size % c->sector_size) != 0) { c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; pr_warn("flash size adjusted to %dKiB\n", c->flash_size); } c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf_verify) { kfree(c->wbuf); return -ENOMEM; } #endif pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); return 0; } void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) { #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY kfree(c->wbuf_verify); #endif kfree(c->wbuf); } int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { /* Cleanmarker currently occupies whole programming regions, * either one or 2 for 8Byte STMicro flashes. */ c->cleanmarker_size = max(16u, c->mtd->writesize); /* Initialize write buffer */ init_rwsem(&c->wbuf_sem); INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf_verify) { kfree(c->wbuf); return -ENOMEM; } #endif return 0; } void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { #ifdef CONFIG_JFFS2_FS_WBUF_VERIFY kfree(c->wbuf_verify); #endif kfree(c->wbuf); } int jffs2_ubivol_setup(struct jffs2_sb_info *c) { c->cleanmarker_size = 0; if (c->mtd->writesize == 1) /* We do not need write-buffer */ return 0; init_rwsem(&c->wbuf_sem); INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); c->wbuf_pagesize = c->mtd->writesize; c->wbuf_ofs = 0xFFFFFFFF; c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); if (!c->wbuf) return -ENOMEM; pr_info("write-buffering enabled buffer (%d) erasesize (%d)\n", c->wbuf_pagesize, c->sector_size); return 0; } void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) { kfree(c->wbuf); }
// SPDX-License-Identifier: GPL-2.0+ /* * Application UART driver for: * Freescale STMP37XX/STMP378X * Alphascale ASM9260 * * Author: dmitry pervushin <[email protected]> * * Copyright 2014 Oleksij Rempel <[email protected]> * Provide Alphascale ASM9260 support. * Copyright 2008-2010 Freescale Semiconductor, Inc. * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/console.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/tty.h> #include <linux/tty_driver.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/of.h> #include <linux/dma-mapping.h> #include <linux/dmaengine.h> #include <linux/gpio/consumer.h> #include <linux/err.h> #include <linux/irq.h> #include "serial_mctrl_gpio.h" #define MXS_AUART_PORTS 5 #define MXS_AUART_FIFO_SIZE 16 #define SET_REG 0x4 #define CLR_REG 0x8 #define TOG_REG 0xc #define AUART_CTRL0 0x00000000 #define AUART_CTRL1 0x00000010 #define AUART_CTRL2 0x00000020 #define AUART_LINECTRL 0x00000030 #define AUART_LINECTRL2 0x00000040 #define AUART_INTR 0x00000050 #define AUART_DATA 0x00000060 #define AUART_STAT 0x00000070 #define AUART_DEBUG 0x00000080 #define AUART_VERSION 0x00000090 #define AUART_AUTOBAUD 0x000000a0 #define AUART_CTRL0_SFTRST (1 << 31) #define AUART_CTRL0_CLKGATE (1 << 30) #define AUART_CTRL0_RXTO_ENABLE (1 << 27) #define AUART_CTRL0_RXTIMEOUT(v) (((v) & 0x7ff) << 16) #define AUART_CTRL0_XFER_COUNT(v) ((v) & 0xffff) #define AUART_CTRL1_XFER_COUNT(v) ((v) & 0xffff) #define AUART_CTRL2_DMAONERR (1 << 26) #define AUART_CTRL2_TXDMAE (1 << 25) #define AUART_CTRL2_RXDMAE (1 << 24) #define AUART_CTRL2_CTSEN (1 << 15) #define AUART_CTRL2_RTSEN (1 << 14) #define AUART_CTRL2_RTS (1 << 11) #define AUART_CTRL2_RXE (1 << 9) #define AUART_CTRL2_TXE (1 << 8) #define AUART_CTRL2_UARTEN (1 << 0) #define AUART_LINECTRL_BAUD_DIV_MAX 0x003fffc0 #define AUART_LINECTRL_BAUD_DIV_MIN 0x000000ec #define AUART_LINECTRL_BAUD_DIVINT_SHIFT 16 #define AUART_LINECTRL_BAUD_DIVINT_MASK 0xffff0000 #define AUART_LINECTRL_BAUD_DIVINT(v) (((v) & 0xffff) << 16) #define AUART_LINECTRL_BAUD_DIVFRAC_SHIFT 8 #define AUART_LINECTRL_BAUD_DIVFRAC_MASK 0x00003f00 #define AUART_LINECTRL_BAUD_DIVFRAC(v) (((v) & 0x3f) << 8) #define AUART_LINECTRL_SPS (1 << 7) #define AUART_LINECTRL_WLEN_MASK 0x00000060 #define AUART_LINECTRL_WLEN(v) ((((v) - 5) & 0x3) << 5) #define AUART_LINECTRL_FEN (1 << 4) #define AUART_LINECTRL_STP2 (1 << 3) #define AUART_LINECTRL_EPS (1 << 2) #define AUART_LINECTRL_PEN (1 << 1) #define AUART_LINECTRL_BRK (1 << 0) #define AUART_INTR_RTIEN (1 << 22) #define AUART_INTR_TXIEN (1 << 21) #define AUART_INTR_RXIEN (1 << 20) #define AUART_INTR_CTSMIEN (1 << 17) #define AUART_INTR_RTIS (1 << 6) #define AUART_INTR_TXIS (1 << 5) #define AUART_INTR_RXIS (1 << 4) #define AUART_INTR_CTSMIS (1 << 1) #define AUART_STAT_BUSY (1 << 29) #define AUART_STAT_CTS (1 << 28) #define AUART_STAT_TXFE (1 << 27) #define AUART_STAT_TXFF (1 << 25) #define AUART_STAT_RXFE (1 << 24) #define AUART_STAT_OERR (1 << 19) #define AUART_STAT_BERR (1 << 18) #define AUART_STAT_PERR (1 << 17) #define AUART_STAT_FERR (1 << 16) #define AUART_STAT_RXCOUNT_MASK 0xffff /* * Start of Alphascale asm9260 defines * This list contains only differences of existing bits * between imx2x and asm9260 */ #define ASM9260_HW_CTRL0 0x0000 /* * RW. Tell the UART to execute the RX DMA Command. The * UART will clear this bit at the end of receive execution. */ #define ASM9260_BM_CTRL0_RXDMA_RUN BIT(28) /* RW. 0 use FIFO for status register; 1 use DMA */ #define ASM9260_BM_CTRL0_RXTO_SOURCE_STATUS BIT(25) /* * RW. RX TIMEOUT Enable. Valid for FIFO and DMA. * Warning: If this bit is set to 0, the RX timeout will not affect receive DMA * operation. If this bit is set to 1, a receive timeout will cause the receive * DMA logic to terminate by filling the remaining DMA bytes with garbage data. */ #define ASM9260_BM_CTRL0_RXTO_ENABLE BIT(24) /* * RW. Receive Timeout Counter Value: number of 8-bit-time to wait before * asserting timeout on the RX input. If the RXFIFO is not empty and the RX * input is idle, then the watchdog counter will decrement each bit-time. Note * 7-bit-time is added to the programmed value, so a value of zero will set * the counter to 7-bit-time, a value of 0x1 gives 15-bit-time and so on. Also * note that the counter is reloaded at the end of each frame, so if the frame * is 10 bits long and the timeout counter value is zero, then timeout will * occur (when FIFO is not empty) even if the RX input is not idle. The default * value is 0x3 (31 bit-time). */ #define ASM9260_BM_CTRL0_RXTO_MASK (0xff << 16) /* TIMEOUT = (100*7+1)*(1/BAUD) */ #define ASM9260_BM_CTRL0_DEFAULT_RXTIMEOUT (20 << 16) /* TX ctrl register */ #define ASM9260_HW_CTRL1 0x0010 /* * RW. Tell the UART to execute the TX DMA Command. The * UART will clear this bit at the end of transmit execution. */ #define ASM9260_BM_CTRL1_TXDMA_RUN BIT(28) #define ASM9260_HW_CTRL2 0x0020 /* * RW. Receive Interrupt FIFO Level Select. * The trigger points for the receive interrupt are as follows: * ONE_EIGHTHS = 0x0 Trigger on FIFO full to at least 2 of 16 entries. * ONE_QUARTER = 0x1 Trigger on FIFO full to at least 4 of 16 entries. * ONE_HALF = 0x2 Trigger on FIFO full to at least 8 of 16 entries. * THREE_QUARTERS = 0x3 Trigger on FIFO full to at least 12 of 16 entries. * SEVEN_EIGHTHS = 0x4 Trigger on FIFO full to at least 14 of 16 entries. */ #define ASM9260_BM_CTRL2_RXIFLSEL (7 << 20) #define ASM9260_BM_CTRL2_DEFAULT_RXIFLSEL (3 << 20) /* RW. Same as RXIFLSEL */ #define ASM9260_BM_CTRL2_TXIFLSEL (7 << 16) #define ASM9260_BM_CTRL2_DEFAULT_TXIFLSEL (2 << 16) /* RW. Set DTR. When this bit is 1, the output is 0. */ #define ASM9260_BM_CTRL2_DTR BIT(10) /* RW. Loop Back Enable */ #define ASM9260_BM_CTRL2_LBE BIT(7) #define ASM9260_BM_CTRL2_PORT_ENABLE BIT(0) #define ASM9260_HW_LINECTRL 0x0030 /* * RW. Stick Parity Select. When bits 1, 2, and 7 of this register are set, the * parity bit is transmitted and checked as a 0. When bits 1 and 7 are set, * and bit 2 is 0, the parity bit is transmitted and checked as a 1. When this * bit is cleared stick parity is disabled. */ #define ASM9260_BM_LCTRL_SPS BIT(7) /* RW. Word length */ #define ASM9260_BM_LCTRL_WLEN (3 << 5) #define ASM9260_BM_LCTRL_CHRL_5 (0 << 5) #define ASM9260_BM_LCTRL_CHRL_6 (1 << 5) #define ASM9260_BM_LCTRL_CHRL_7 (2 << 5) #define ASM9260_BM_LCTRL_CHRL_8 (3 << 5) /* * Interrupt register. * contains the interrupt enables and the interrupt status bits */ #define ASM9260_HW_INTR 0x0040 /* Tx FIFO EMPTY Raw Interrupt enable */ #define ASM9260_BM_INTR_TFEIEN BIT(27) /* Overrun Error Interrupt Enable. */ #define ASM9260_BM_INTR_OEIEN BIT(26) /* Break Error Interrupt Enable. */ #define ASM9260_BM_INTR_BEIEN BIT(25) /* Parity Error Interrupt Enable. */ #define ASM9260_BM_INTR_PEIEN BIT(24) /* Framing Error Interrupt Enable. */ #define ASM9260_BM_INTR_FEIEN BIT(23) /* nUARTDSR Modem Interrupt Enable. */ #define ASM9260_BM_INTR_DSRMIEN BIT(19) /* nUARTDCD Modem Interrupt Enable. */ #define ASM9260_BM_INTR_DCDMIEN BIT(18) /* nUARTRI Modem Interrupt Enable. */ #define ASM9260_BM_INTR_RIMIEN BIT(16) /* Auto-Boud Timeout */ #define ASM9260_BM_INTR_ABTO BIT(13) #define ASM9260_BM_INTR_ABEO BIT(12) /* Tx FIFO EMPTY Raw Interrupt state */ #define ASM9260_BM_INTR_TFEIS BIT(11) /* Overrun Error */ #define ASM9260_BM_INTR_OEIS BIT(10) /* Break Error */ #define ASM9260_BM_INTR_BEIS BIT(9) /* Parity Error */ #define ASM9260_BM_INTR_PEIS BIT(8) /* Framing Error */ #define ASM9260_BM_INTR_FEIS BIT(7) #define ASM9260_BM_INTR_DSRMIS BIT(3) #define ASM9260_BM_INTR_DCDMIS BIT(2) #define ASM9260_BM_INTR_RIMIS BIT(0) /* * RW. In DMA mode, up to 4 Received/Transmit characters can be accessed at a * time. In PIO mode, only one character can be accessed at a time. The status * register contains the receive data flags and valid bits. */ #define ASM9260_HW_DATA 0x0050 #define ASM9260_HW_STAT 0x0060 /* RO. If 1, UARTAPP is present in this product. */ #define ASM9260_BM_STAT_PRESENT BIT(31) /* RO. If 1, HISPEED is present in this product. */ #define ASM9260_BM_STAT_HISPEED BIT(30) /* RO. Receive FIFO Full. */ #define ASM9260_BM_STAT_RXFULL BIT(26) /* RO. The UART Debug Register contains the state of the DMA signals. */ #define ASM9260_HW_DEBUG 0x0070 /* DMA Command Run Status */ #define ASM9260_BM_DEBUG_TXDMARUN BIT(5) #define ASM9260_BM_DEBUG_RXDMARUN BIT(4) /* DMA Command End Status */ #define ASM9260_BM_DEBUG_TXCMDEND BIT(3) #define ASM9260_BM_DEBUG_RXCMDEND BIT(2) /* DMA Request Status */ #define ASM9260_BM_DEBUG_TXDMARQ BIT(1) #define ASM9260_BM_DEBUG_RXDMARQ BIT(0) #define ASM9260_HW_ILPR 0x0080 #define ASM9260_HW_RS485CTRL 0x0090 /* * RW. This bit reverses the polarity of the direction control signal on the RTS * (or DTR) pin. * If 0, The direction control pin will be driven to logic ‘0’ when the * transmitter has data to be sent. It will be driven to logic ‘1’ after the * last bit of data has been transmitted. */ #define ASM9260_BM_RS485CTRL_ONIV BIT(5) /* RW. Enable Auto Direction Control. */ #define ASM9260_BM_RS485CTRL_DIR_CTRL BIT(4) /* * RW. If 0 and DIR_CTRL = 1, pin RTS is used for direction control. * If 1 and DIR_CTRL = 1, pin DTR is used for direction control. */ #define ASM9260_BM_RS485CTRL_PINSEL BIT(3) /* RW. Enable Auto Address Detect (AAD). */ #define ASM9260_BM_RS485CTRL_AADEN BIT(2) /* RW. Disable receiver. */ #define ASM9260_BM_RS485CTRL_RXDIS BIT(1) /* RW. Enable RS-485/EIA-485 Normal Multidrop Mode (NMM) */ #define ASM9260_BM_RS485CTRL_RS485EN BIT(0) #define ASM9260_HW_RS485ADRMATCH 0x00a0 /* Contains the address match value. */ #define ASM9260_BM_RS485ADRMATCH_MASK (0xff << 0) #define ASM9260_HW_RS485DLY 0x00b0 /* * RW. Contains the direction control (RTS or DTR) delay value. This delay time * is in periods of the baud clock. */ #define ASM9260_BM_RS485DLY_MASK (0xff << 0) #define ASM9260_HW_AUTOBAUD 0x00c0 /* WO. Auto-baud time-out interrupt clear bit. */ #define ASM9260_BM_AUTOBAUD_TO_INT_CLR BIT(9) /* WO. End of auto-baud interrupt clear bit. */ #define ASM9260_BM_AUTOBAUD_EO_INT_CLR BIT(8) /* Restart in case of timeout (counter restarts at next UART Rx falling edge) */ #define ASM9260_BM_AUTOBAUD_AUTORESTART BIT(2) /* Auto-baud mode select bit. 0 - Mode 0, 1 - Mode 1. */ #define ASM9260_BM_AUTOBAUD_MODE BIT(1) /* * Auto-baud start (auto-baud is running). Auto-baud run bit. This bit is * automatically cleared after auto-baud completion. */ #define ASM9260_BM_AUTOBAUD_START BIT(0) #define ASM9260_HW_CTRL3 0x00d0 #define ASM9260_BM_CTRL3_OUTCLK_DIV_MASK (0xffff << 16) /* * RW. Provide clk over OUTCLK pin. In case of asm9260 it can be configured on * pins 137 and 144. */ #define ASM9260_BM_CTRL3_MASTERMODE BIT(6) /* RW. Baud Rate Mode: 1 - Enable sync mode. 0 - async mode. */ #define ASM9260_BM_CTRL3_SYNCMODE BIT(4) /* RW. 1 - MSB bit send frist; 0 - LSB bit frist. */ #define ASM9260_BM_CTRL3_MSBF BIT(2) /* RW. 1 - sample rate = 8 x Baudrate; 0 - sample rate = 16 x Baudrate. */ #define ASM9260_BM_CTRL3_BAUD8 BIT(1) /* RW. 1 - Set word length to 9bit. 0 - use ASM9260_BM_LCTRL_WLEN */ #define ASM9260_BM_CTRL3_9BIT BIT(0) #define ASM9260_HW_ISO7816_CTRL 0x00e0 /* RW. Enable High Speed mode. */ #define ASM9260_BM_ISO7816CTRL_HS BIT(12) /* Disable Successive Receive NACK */ #define ASM9260_BM_ISO7816CTRL_DS_NACK BIT(8) #define ASM9260_BM_ISO7816CTRL_MAX_ITER_MASK (0xff << 4) /* Receive NACK Inhibit */ #define ASM9260_BM_ISO7816CTRL_INACK BIT(3) #define ASM9260_BM_ISO7816CTRL_NEG_DATA BIT(2) /* RW. 1 - ISO7816 mode; 0 - USART mode */ #define ASM9260_BM_ISO7816CTRL_ENABLE BIT(0) #define ASM9260_HW_ISO7816_ERRCNT 0x00f0 /* Parity error counter. Will be cleared after reading */ #define ASM9260_BM_ISO7816_NB_ERRORS_MASK (0xff << 0) #define ASM9260_HW_ISO7816_STATUS 0x0100 /* Max number of Repetitions Reached */ #define ASM9260_BM_ISO7816_STAT_ITERATION BIT(0) /* End of Alphascale asm9260 defines */ static struct uart_driver auart_driver; enum mxs_auart_type { IMX23_AUART, IMX28_AUART, ASM9260_AUART, }; struct vendor_data { const u16 *reg_offset; }; enum { REG_CTRL0, REG_CTRL1, REG_CTRL2, REG_LINECTRL, REG_LINECTRL2, REG_INTR, REG_DATA, REG_STAT, REG_DEBUG, REG_VERSION, REG_AUTOBAUD, /* The size of the array - must be last */ REG_ARRAY_SIZE, }; static const u16 mxs_asm9260_offsets[REG_ARRAY_SIZE] = { [REG_CTRL0] = ASM9260_HW_CTRL0, [REG_CTRL1] = ASM9260_HW_CTRL1, [REG_CTRL2] = ASM9260_HW_CTRL2, [REG_LINECTRL] = ASM9260_HW_LINECTRL, [REG_INTR] = ASM9260_HW_INTR, [REG_DATA] = ASM9260_HW_DATA, [REG_STAT] = ASM9260_HW_STAT, [REG_DEBUG] = ASM9260_HW_DEBUG, [REG_AUTOBAUD] = ASM9260_HW_AUTOBAUD, }; static const u16 mxs_stmp37xx_offsets[REG_ARRAY_SIZE] = { [REG_CTRL0] = AUART_CTRL0, [REG_CTRL1] = AUART_CTRL1, [REG_CTRL2] = AUART_CTRL2, [REG_LINECTRL] = AUART_LINECTRL, [REG_LINECTRL2] = AUART_LINECTRL2, [REG_INTR] = AUART_INTR, [REG_DATA] = AUART_DATA, [REG_STAT] = AUART_STAT, [REG_DEBUG] = AUART_DEBUG, [REG_VERSION] = AUART_VERSION, [REG_AUTOBAUD] = AUART_AUTOBAUD, }; static const struct vendor_data vendor_alphascale_asm9260 = { .reg_offset = mxs_asm9260_offsets, }; static const struct vendor_data vendor_freescale_stmp37xx = { .reg_offset = mxs_stmp37xx_offsets, }; struct mxs_auart_port { struct uart_port port; #define MXS_AUART_DMA_ENABLED 0x2 #define MXS_AUART_DMA_TX_SYNC 2 /* bit 2 */ #define MXS_AUART_DMA_RX_READY 3 /* bit 3 */ #define MXS_AUART_RTSCTS 4 /* bit 4 */ unsigned long flags; unsigned int mctrl_prev; enum mxs_auart_type devtype; const struct vendor_data *vendor; struct clk *clk; struct clk *clk_ahb; struct device *dev; /* for DMA */ struct scatterlist tx_sgl; struct dma_chan *tx_dma_chan; void *tx_dma_buf; struct scatterlist rx_sgl; struct dma_chan *rx_dma_chan; void *rx_dma_buf; struct mctrl_gpios *gpios; int gpio_irq[UART_GPIO_MAX]; bool ms_irq_enabled; }; static const struct of_device_id mxs_auart_dt_ids[] = { { .compatible = "fsl,imx28-auart", .data = (const void *)IMX28_AUART }, { .compatible = "fsl,imx23-auart", .data = (const void *)IMX23_AUART }, { .compatible = "alphascale,asm9260-auart", .data = (const void *)ASM9260_AUART }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mxs_auart_dt_ids); static inline int is_imx28_auart(struct mxs_auart_port *s) { return s->devtype == IMX28_AUART; } static inline int is_asm9260_auart(struct mxs_auart_port *s) { return s->devtype == ASM9260_AUART; } static inline bool auart_dma_enabled(struct mxs_auart_port *s) { return s->flags & MXS_AUART_DMA_ENABLED; } static unsigned int mxs_reg_to_offset(const struct mxs_auart_port *uap, unsigned int reg) { return uap->vendor->reg_offset[reg]; } static unsigned int mxs_read(const struct mxs_auart_port *uap, unsigned int reg) { void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg); return readl_relaxed(addr); } static void mxs_write(unsigned int val, struct mxs_auart_port *uap, unsigned int reg) { void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg); writel_relaxed(val, addr); } static void mxs_set(unsigned int val, struct mxs_auart_port *uap, unsigned int reg) { void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg); writel_relaxed(val, addr + SET_REG); } static void mxs_clr(unsigned int val, struct mxs_auart_port *uap, unsigned int reg) { void __iomem *addr = uap->port.membase + mxs_reg_to_offset(uap, reg); writel_relaxed(val, addr + CLR_REG); } static void mxs_auart_stop_tx(struct uart_port *u); #define to_auart_port(u) container_of(u, struct mxs_auart_port, port) static void mxs_auart_tx_chars(struct mxs_auart_port *s); static void dma_tx_callback(void *param) { struct mxs_auart_port *s = param; struct tty_port *tport = &s->port.state->port; dma_unmap_sg(s->dev, &s->tx_sgl, 1, DMA_TO_DEVICE); /* clear the bit used to serialize the DMA tx. */ clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); smp_mb__after_atomic(); /* wake up the possible processes. */ if (kfifo_len(&tport->xmit_fifo) < WAKEUP_CHARS) uart_write_wakeup(&s->port); mxs_auart_tx_chars(s); } static int mxs_auart_dma_tx(struct mxs_auart_port *s, int size) { struct dma_async_tx_descriptor *desc; struct scatterlist *sgl = &s->tx_sgl; struct dma_chan *channel = s->tx_dma_chan; u32 pio; /* [1] : send PIO. Note, the first pio word is CTRL1. */ pio = AUART_CTRL1_XFER_COUNT(size); desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)&pio, 1, DMA_TRANS_NONE, 0); if (!desc) { dev_err(s->dev, "step 1 error\n"); return -EINVAL; } /* [2] : set DMA buffer. */ sg_init_one(sgl, s->tx_dma_buf, size); dma_map_sg(s->dev, sgl, 1, DMA_TO_DEVICE); desc = dmaengine_prep_slave_sg(channel, sgl, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { dev_err(s->dev, "step 2 error\n"); return -EINVAL; } /* [3] : submit the DMA */ desc->callback = dma_tx_callback; desc->callback_param = s; dmaengine_submit(desc); dma_async_issue_pending(channel); return 0; } static void mxs_auart_tx_chars(struct mxs_auart_port *s) { struct tty_port *tport = &s->port.state->port; bool pending; u8 ch; if (auart_dma_enabled(s)) { u32 i = 0; void *buffer = s->tx_dma_buf; if (test_and_set_bit(MXS_AUART_DMA_TX_SYNC, &s->flags)) return; if (uart_tx_stopped(&s->port)) mxs_auart_stop_tx(&s->port); else i = kfifo_out(&tport->xmit_fifo, buffer, UART_XMIT_SIZE); if (i) { mxs_auart_dma_tx(s, i); } else { clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); smp_mb__after_atomic(); } return; } pending = uart_port_tx_flags(&s->port, ch, UART_TX_NOSTOP, !(mxs_read(s, REG_STAT) & AUART_STAT_TXFF), mxs_write(ch, s, REG_DATA)); if (pending) mxs_set(AUART_INTR_TXIEN, s, REG_INTR); else mxs_clr(AUART_INTR_TXIEN, s, REG_INTR); if (uart_tx_stopped(&s->port)) mxs_auart_stop_tx(&s->port); } static void mxs_auart_rx_char(struct mxs_auart_port *s) { u32 stat; u8 c, flag; c = mxs_read(s, REG_DATA); stat = mxs_read(s, REG_STAT); flag = TTY_NORMAL; s->port.icount.rx++; if (stat & AUART_STAT_BERR) { s->port.icount.brk++; if (uart_handle_break(&s->port)) goto out; } else if (stat & AUART_STAT_PERR) { s->port.icount.parity++; } else if (stat & AUART_STAT_FERR) { s->port.icount.frame++; } /* * Mask off conditions which should be ingored. */ stat &= s->port.read_status_mask; if (stat & AUART_STAT_BERR) { flag = TTY_BREAK; } else if (stat & AUART_STAT_PERR) flag = TTY_PARITY; else if (stat & AUART_STAT_FERR) flag = TTY_FRAME; if (stat & AUART_STAT_OERR) s->port.icount.overrun++; if (uart_handle_sysrq_char(&s->port, c)) goto out; uart_insert_char(&s->port, stat, AUART_STAT_OERR, c, flag); out: mxs_write(stat, s, REG_STAT); } static void mxs_auart_rx_chars(struct mxs_auart_port *s) { u32 stat = 0; for (;;) { stat = mxs_read(s, REG_STAT); if (stat & AUART_STAT_RXFE) break; mxs_auart_rx_char(s); } mxs_write(stat, s, REG_STAT); tty_flip_buffer_push(&s->port.state->port); } static int mxs_auart_request_port(struct uart_port *u) { return 0; } static int mxs_auart_verify_port(struct uart_port *u, struct serial_struct *ser) { if (u->type != PORT_UNKNOWN && u->type != PORT_IMX) return -EINVAL; return 0; } static void mxs_auart_config_port(struct uart_port *u, int flags) { } static const char *mxs_auart_type(struct uart_port *u) { struct mxs_auart_port *s = to_auart_port(u); return dev_name(s->dev); } static void mxs_auart_release_port(struct uart_port *u) { } static void mxs_auart_set_mctrl(struct uart_port *u, unsigned mctrl) { struct mxs_auart_port *s = to_auart_port(u); u32 ctrl = mxs_read(s, REG_CTRL2); ctrl &= ~(AUART_CTRL2_RTSEN | AUART_CTRL2_RTS); if (mctrl & TIOCM_RTS) { if (uart_cts_enabled(u)) ctrl |= AUART_CTRL2_RTSEN; else ctrl |= AUART_CTRL2_RTS; } mxs_write(ctrl, s, REG_CTRL2); mctrl_gpio_set(s->gpios, mctrl); } #define MCTRL_ANY_DELTA (TIOCM_RI | TIOCM_DSR | TIOCM_CD | TIOCM_CTS) static u32 mxs_auart_modem_status(struct mxs_auart_port *s, u32 mctrl) { u32 mctrl_diff; mctrl_diff = mctrl ^ s->mctrl_prev; s->mctrl_prev = mctrl; if (mctrl_diff & MCTRL_ANY_DELTA && s->ms_irq_enabled && s->port.state != NULL) { if (mctrl_diff & TIOCM_RI) s->port.icount.rng++; if (mctrl_diff & TIOCM_DSR) s->port.icount.dsr++; if (mctrl_diff & TIOCM_CD) uart_handle_dcd_change(&s->port, mctrl & TIOCM_CD); if (mctrl_diff & TIOCM_CTS) uart_handle_cts_change(&s->port, mctrl & TIOCM_CTS); wake_up_interruptible(&s->port.state->port.delta_msr_wait); } return mctrl; } static u32 mxs_auart_get_mctrl(struct uart_port *u) { struct mxs_auart_port *s = to_auart_port(u); u32 stat = mxs_read(s, REG_STAT); u32 mctrl = 0; if (stat & AUART_STAT_CTS) mctrl |= TIOCM_CTS; return mctrl_gpio_get(s->gpios, &mctrl); } /* * Enable modem status interrupts */ static void mxs_auart_enable_ms(struct uart_port *port) { struct mxs_auart_port *s = to_auart_port(port); /* * Interrupt should not be enabled twice */ if (s->ms_irq_enabled) return; s->ms_irq_enabled = true; if (s->gpio_irq[UART_GPIO_CTS] >= 0) enable_irq(s->gpio_irq[UART_GPIO_CTS]); /* TODO: enable AUART_INTR_CTSMIEN otherwise */ if (s->gpio_irq[UART_GPIO_DSR] >= 0) enable_irq(s->gpio_irq[UART_GPIO_DSR]); if (s->gpio_irq[UART_GPIO_RI] >= 0) enable_irq(s->gpio_irq[UART_GPIO_RI]); if (s->gpio_irq[UART_GPIO_DCD] >= 0) enable_irq(s->gpio_irq[UART_GPIO_DCD]); } /* * Disable modem status interrupts */ static void mxs_auart_disable_ms(struct uart_port *port) { struct mxs_auart_port *s = to_auart_port(port); /* * Interrupt should not be disabled twice */ if (!s->ms_irq_enabled) return; s->ms_irq_enabled = false; if (s->gpio_irq[UART_GPIO_CTS] >= 0) disable_irq(s->gpio_irq[UART_GPIO_CTS]); /* TODO: disable AUART_INTR_CTSMIEN otherwise */ if (s->gpio_irq[UART_GPIO_DSR] >= 0) disable_irq(s->gpio_irq[UART_GPIO_DSR]); if (s->gpio_irq[UART_GPIO_RI] >= 0) disable_irq(s->gpio_irq[UART_GPIO_RI]); if (s->gpio_irq[UART_GPIO_DCD] >= 0) disable_irq(s->gpio_irq[UART_GPIO_DCD]); } static int mxs_auart_dma_prep_rx(struct mxs_auart_port *s); static void dma_rx_callback(void *arg) { struct mxs_auart_port *s = (struct mxs_auart_port *) arg; struct tty_port *port = &s->port.state->port; int count; u32 stat; dma_unmap_sg(s->dev, &s->rx_sgl, 1, DMA_FROM_DEVICE); stat = mxs_read(s, REG_STAT); stat &= ~(AUART_STAT_OERR | AUART_STAT_BERR | AUART_STAT_PERR | AUART_STAT_FERR); count = stat & AUART_STAT_RXCOUNT_MASK; tty_insert_flip_string(port, s->rx_dma_buf, count); mxs_write(stat, s, REG_STAT); tty_flip_buffer_push(port); /* start the next DMA for RX. */ mxs_auart_dma_prep_rx(s); } static int mxs_auart_dma_prep_rx(struct mxs_auart_port *s) { struct dma_async_tx_descriptor *desc; struct scatterlist *sgl = &s->rx_sgl; struct dma_chan *channel = s->rx_dma_chan; u32 pio[1]; /* [1] : send PIO */ pio[0] = AUART_CTRL0_RXTO_ENABLE | AUART_CTRL0_RXTIMEOUT(0x80) | AUART_CTRL0_XFER_COUNT(UART_XMIT_SIZE); desc = dmaengine_prep_slave_sg(channel, (struct scatterlist *)pio, 1, DMA_TRANS_NONE, 0); if (!desc) { dev_err(s->dev, "step 1 error\n"); return -EINVAL; } /* [2] : send DMA request */ sg_init_one(sgl, s->rx_dma_buf, UART_XMIT_SIZE); dma_map_sg(s->dev, sgl, 1, DMA_FROM_DEVICE); desc = dmaengine_prep_slave_sg(channel, sgl, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) { dev_err(s->dev, "step 2 error\n"); return -1; } /* [3] : submit the DMA, but do not issue it. */ desc->callback = dma_rx_callback; desc->callback_param = s; dmaengine_submit(desc); dma_async_issue_pending(channel); return 0; } static void mxs_auart_dma_exit_channel(struct mxs_auart_port *s) { if (s->tx_dma_chan) { dma_release_channel(s->tx_dma_chan); s->tx_dma_chan = NULL; } if (s->rx_dma_chan) { dma_release_channel(s->rx_dma_chan); s->rx_dma_chan = NULL; } kfree(s->tx_dma_buf); kfree(s->rx_dma_buf); s->tx_dma_buf = NULL; s->rx_dma_buf = NULL; } static void mxs_auart_dma_exit(struct mxs_auart_port *s) { mxs_clr(AUART_CTRL2_TXDMAE | AUART_CTRL2_RXDMAE | AUART_CTRL2_DMAONERR, s, REG_CTRL2); mxs_auart_dma_exit_channel(s); s->flags &= ~MXS_AUART_DMA_ENABLED; clear_bit(MXS_AUART_DMA_TX_SYNC, &s->flags); clear_bit(MXS_AUART_DMA_RX_READY, &s->flags); } static int mxs_auart_dma_init(struct mxs_auart_port *s) { struct dma_chan *chan; if (auart_dma_enabled(s)) return 0; /* init for RX */ chan = dma_request_chan(s->dev, "rx"); if (IS_ERR(chan)) goto err_out; s->rx_dma_chan = chan; s->rx_dma_buf = kzalloc(UART_XMIT_SIZE, GFP_KERNEL | GFP_DMA); if (!s->rx_dma_buf) goto err_out; /* init for TX */ chan = dma_request_chan(s->dev, "tx"); if (IS_ERR(chan)) goto err_out; s->tx_dma_chan = chan; s->tx_dma_buf = kzalloc(UART_XMIT_SIZE, GFP_KERNEL | GFP_DMA); if (!s->tx_dma_buf) goto err_out; /* set the flags */ s->flags |= MXS_AUART_DMA_ENABLED; dev_dbg(s->dev, "enabled the DMA support."); /* The DMA buffer is now the FIFO the TTY subsystem can use */ s->port.fifosize = UART_XMIT_SIZE; return 0; err_out: mxs_auart_dma_exit_channel(s); return -EINVAL; } #define RTS_AT_AUART() !mctrl_gpio_to_gpiod(s->gpios, UART_GPIO_RTS) #define CTS_AT_AUART() !mctrl_gpio_to_gpiod(s->gpios, UART_GPIO_CTS) static void mxs_auart_settermios(struct uart_port *u, struct ktermios *termios, const struct ktermios *old) { struct mxs_auart_port *s = to_auart_port(u); u32 ctrl, ctrl2, div; unsigned int cflag, baud, baud_min, baud_max; cflag = termios->c_cflag; ctrl = AUART_LINECTRL_FEN; ctrl2 = mxs_read(s, REG_CTRL2); ctrl |= AUART_LINECTRL_WLEN(tty_get_char_size(cflag)); /* parity */ if (cflag & PARENB) { ctrl |= AUART_LINECTRL_PEN; if ((cflag & PARODD) == 0) ctrl |= AUART_LINECTRL_EPS; if (cflag & CMSPAR) ctrl |= AUART_LINECTRL_SPS; } u->read_status_mask = AUART_STAT_OERR; if (termios->c_iflag & INPCK) u->read_status_mask |= AUART_STAT_PERR; if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK)) u->read_status_mask |= AUART_STAT_BERR; /* * Characters to ignore */ u->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) u->ignore_status_mask |= AUART_STAT_PERR; if (termios->c_iflag & IGNBRK) { u->ignore_status_mask |= AUART_STAT_BERR; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) u->ignore_status_mask |= AUART_STAT_OERR; } /* * ignore all characters if CREAD is not set */ if (cflag & CREAD) ctrl2 |= AUART_CTRL2_RXE; else ctrl2 &= ~AUART_CTRL2_RXE; /* figure out the stop bits requested */ if (cflag & CSTOPB) ctrl |= AUART_LINECTRL_STP2; /* figure out the hardware flow control settings */ ctrl2 &= ~(AUART_CTRL2_CTSEN | AUART_CTRL2_RTSEN); if (cflag & CRTSCTS) { /* * The DMA has a bug(see errata:2836) in mx23. * So we can not implement the DMA for auart in mx23, * we can only implement the DMA support for auart * in mx28. */ if (is_imx28_auart(s) && test_bit(MXS_AUART_RTSCTS, &s->flags)) { if (!mxs_auart_dma_init(s)) /* enable DMA tranfer */ ctrl2 |= AUART_CTRL2_TXDMAE | AUART_CTRL2_RXDMAE | AUART_CTRL2_DMAONERR; } /* Even if RTS is GPIO line RTSEN can be enabled because * the pinctrl configuration decides about RTS pin function */ ctrl2 |= AUART_CTRL2_RTSEN; if (CTS_AT_AUART()) ctrl2 |= AUART_CTRL2_CTSEN; } /* set baud rate */ if (is_asm9260_auart(s)) { baud = uart_get_baud_rate(u, termios, old, u->uartclk * 4 / 0x3FFFFF, u->uartclk / 16); div = u->uartclk * 4 / baud; } else { baud_min = DIV_ROUND_UP(u->uartclk * 32, AUART_LINECTRL_BAUD_DIV_MAX); baud_max = u->uartclk * 32 / AUART_LINECTRL_BAUD_DIV_MIN; baud = uart_get_baud_rate(u, termios, old, baud_min, baud_max); div = DIV_ROUND_CLOSEST(u->uartclk * 32, baud); } ctrl |= AUART_LINECTRL_BAUD_DIVFRAC(div & 0x3F); ctrl |= AUART_LINECTRL_BAUD_DIVINT(div >> 6); mxs_write(ctrl, s, REG_LINECTRL); mxs_write(ctrl2, s, REG_CTRL2); uart_update_timeout(u, termios->c_cflag, baud); /* prepare for the DMA RX. */ if (auart_dma_enabled(s) && !test_and_set_bit(MXS_AUART_DMA_RX_READY, &s->flags)) { if (!mxs_auart_dma_prep_rx(s)) { /* Disable the normal RX interrupt. */ mxs_clr(AUART_INTR_RXIEN | AUART_INTR_RTIEN, s, REG_INTR); } else { mxs_auart_dma_exit(s); dev_err(s->dev, "We can not start up the DMA.\n"); } } /* CTS flow-control and modem-status interrupts */ if (UART_ENABLE_MS(u, termios->c_cflag)) mxs_auart_enable_ms(u); else mxs_auart_disable_ms(u); } static void mxs_auart_set_ldisc(struct uart_port *port, struct ktermios *termios) { if (termios->c_line == N_PPS) { port->flags |= UPF_HARDPPS_CD; mxs_auart_enable_ms(port); } else { port->flags &= ~UPF_HARDPPS_CD; } } static irqreturn_t mxs_auart_irq_handle(int irq, void *context) { u32 istat, stat; struct mxs_auart_port *s = context; u32 mctrl_temp = s->mctrl_prev; uart_port_lock(&s->port); stat = mxs_read(s, REG_STAT); istat = mxs_read(s, REG_INTR); /* ack irq */ mxs_clr(istat & (AUART_INTR_RTIS | AUART_INTR_TXIS | AUART_INTR_RXIS | AUART_INTR_CTSMIS), s, REG_INTR); /* * Dealing with GPIO interrupt */ if (irq == s->gpio_irq[UART_GPIO_CTS] || irq == s->gpio_irq[UART_GPIO_DCD] || irq == s->gpio_irq[UART_GPIO_DSR] || irq == s->gpio_irq[UART_GPIO_RI]) mxs_auart_modem_status(s, mctrl_gpio_get(s->gpios, &mctrl_temp)); if (istat & AUART_INTR_CTSMIS) { if (CTS_AT_AUART() && s->ms_irq_enabled) uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS); mxs_clr(AUART_INTR_CTSMIS, s, REG_INTR); istat &= ~AUART_INTR_CTSMIS; } if (istat & (AUART_INTR_RTIS | AUART_INTR_RXIS)) { if (!auart_dma_enabled(s)) mxs_auart_rx_chars(s); istat &= ~(AUART_INTR_RTIS | AUART_INTR_RXIS); } if (istat & AUART_INTR_TXIS) { mxs_auart_tx_chars(s); istat &= ~AUART_INTR_TXIS; } uart_port_unlock(&s->port); return IRQ_HANDLED; } static void mxs_auart_reset_deassert(struct mxs_auart_port *s) { int i; unsigned int reg; mxs_clr(AUART_CTRL0_SFTRST, s, REG_CTRL0); for (i = 0; i < 10000; i++) { reg = mxs_read(s, REG_CTRL0); if (!(reg & AUART_CTRL0_SFTRST)) break; udelay(3); } mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0); } static void mxs_auart_reset_assert(struct mxs_auart_port *s) { int i; u32 reg; reg = mxs_read(s, REG_CTRL0); /* if already in reset state, keep it untouched */ if (reg & AUART_CTRL0_SFTRST) return; mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0); mxs_set(AUART_CTRL0_SFTRST, s, REG_CTRL0); for (i = 0; i < 1000; i++) { reg = mxs_read(s, REG_CTRL0); /* reset is finished when the clock is gated */ if (reg & AUART_CTRL0_CLKGATE) return; udelay(10); } dev_err(s->dev, "Failed to reset the unit."); } static int mxs_auart_startup(struct uart_port *u) { int ret; struct mxs_auart_port *s = to_auart_port(u); ret = clk_prepare_enable(s->clk); if (ret) return ret; if (uart_console(u)) { mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0); } else { /* reset the unit to a well known state */ mxs_auart_reset_assert(s); mxs_auart_reset_deassert(s); } mxs_set(AUART_CTRL2_UARTEN, s, REG_CTRL2); mxs_write(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN, s, REG_INTR); /* Reset FIFO size (it could have changed if DMA was enabled) */ u->fifosize = MXS_AUART_FIFO_SIZE; /* * Enable fifo so all four bytes of a DMA word are written to * output (otherwise, only the LSB is written, ie. 1 in 4 bytes) */ mxs_set(AUART_LINECTRL_FEN, s, REG_LINECTRL); /* get initial status of modem lines */ mctrl_gpio_get(s->gpios, &s->mctrl_prev); s->ms_irq_enabled = false; return 0; } static void mxs_auart_shutdown(struct uart_port *u) { struct mxs_auart_port *s = to_auart_port(u); mxs_auart_disable_ms(u); if (auart_dma_enabled(s)) mxs_auart_dma_exit(s); if (uart_console(u)) { mxs_clr(AUART_CTRL2_UARTEN, s, REG_CTRL2); mxs_clr(AUART_INTR_RXIEN | AUART_INTR_RTIEN | AUART_INTR_CTSMIEN, s, REG_INTR); mxs_set(AUART_CTRL0_CLKGATE, s, REG_CTRL0); } else { mxs_auart_reset_assert(s); } clk_disable_unprepare(s->clk); } static unsigned int mxs_auart_tx_empty(struct uart_port *u) { struct mxs_auart_port *s = to_auart_port(u); if ((mxs_read(s, REG_STAT) & (AUART_STAT_TXFE | AUART_STAT_BUSY)) == AUART_STAT_TXFE) return TIOCSER_TEMT; return 0; } static void mxs_auart_start_tx(struct uart_port *u) { struct mxs_auart_port *s = to_auart_port(u); /* enable transmitter */ mxs_set(AUART_CTRL2_TXE, s, REG_CTRL2); mxs_auart_tx_chars(s); } static void mxs_auart_stop_tx(struct uart_port *u) { struct mxs_auart_port *s = to_auart_port(u); mxs_clr(AUART_CTRL2_TXE, s, REG_CTRL2); } static void mxs_auart_stop_rx(struct uart_port *u) { struct mxs_auart_port *s = to_auart_port(u); mxs_clr(AUART_CTRL2_RXE, s, REG_CTRL2); } static void mxs_auart_break_ctl(struct uart_port *u, int ctl) { struct mxs_auart_port *s = to_auart_port(u); if (ctl) mxs_set(AUART_LINECTRL_BRK, s, REG_LINECTRL); else mxs_clr(AUART_LINECTRL_BRK, s, REG_LINECTRL); } static const struct uart_ops mxs_auart_ops = { .tx_empty = mxs_auart_tx_empty, .start_tx = mxs_auart_start_tx, .stop_tx = mxs_auart_stop_tx, .stop_rx = mxs_auart_stop_rx, .enable_ms = mxs_auart_enable_ms, .break_ctl = mxs_auart_break_ctl, .set_mctrl = mxs_auart_set_mctrl, .get_mctrl = mxs_auart_get_mctrl, .startup = mxs_auart_startup, .shutdown = mxs_auart_shutdown, .set_termios = mxs_auart_settermios, .set_ldisc = mxs_auart_set_ldisc, .type = mxs_auart_type, .release_port = mxs_auart_release_port, .request_port = mxs_auart_request_port, .config_port = mxs_auart_config_port, .verify_port = mxs_auart_verify_port, }; static struct mxs_auart_port *auart_port[MXS_AUART_PORTS]; #ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE static void mxs_auart_console_putchar(struct uart_port *port, unsigned char ch) { struct mxs_auart_port *s = to_auart_port(port); unsigned int to = 1000; while (mxs_read(s, REG_STAT) & AUART_STAT_TXFF) { if (!to--) break; udelay(1); } mxs_write(ch, s, REG_DATA); } static void auart_console_write(struct console *co, const char *str, unsigned int count) { struct mxs_auart_port *s; struct uart_port *port; unsigned int old_ctrl0, old_ctrl2; unsigned int to = 20000; if (co->index >= MXS_AUART_PORTS || co->index < 0) return; s = auart_port[co->index]; port = &s->port; clk_enable(s->clk); /* First save the CR then disable the interrupts */ old_ctrl2 = mxs_read(s, REG_CTRL2); old_ctrl0 = mxs_read(s, REG_CTRL0); mxs_clr(AUART_CTRL0_CLKGATE, s, REG_CTRL0); mxs_set(AUART_CTRL2_UARTEN | AUART_CTRL2_TXE, s, REG_CTRL2); uart_console_write(port, str, count, mxs_auart_console_putchar); /* Finally, wait for transmitter to become empty ... */ while (mxs_read(s, REG_STAT) & AUART_STAT_BUSY) { udelay(1); if (!to--) break; } /* * ... and restore the TCR if we waited long enough for the transmitter * to be idle. This might keep the transmitter enabled although it is * unused, but that is better than to disable it while it is still * transmitting. */ if (!(mxs_read(s, REG_STAT) & AUART_STAT_BUSY)) { mxs_write(old_ctrl0, s, REG_CTRL0); mxs_write(old_ctrl2, s, REG_CTRL2); } clk_disable(s->clk); } static void __init auart_console_get_options(struct mxs_auart_port *s, int *baud, int *parity, int *bits) { struct uart_port *port = &s->port; unsigned int lcr_h, quot; if (!(mxs_read(s, REG_CTRL2) & AUART_CTRL2_UARTEN)) return; lcr_h = mxs_read(s, REG_LINECTRL); *parity = 'n'; if (lcr_h & AUART_LINECTRL_PEN) { if (lcr_h & AUART_LINECTRL_EPS) *parity = 'e'; else *parity = 'o'; } if ((lcr_h & AUART_LINECTRL_WLEN_MASK) == AUART_LINECTRL_WLEN(7)) *bits = 7; else *bits = 8; quot = ((mxs_read(s, REG_LINECTRL) & AUART_LINECTRL_BAUD_DIVINT_MASK)) >> (AUART_LINECTRL_BAUD_DIVINT_SHIFT - 6); quot |= ((mxs_read(s, REG_LINECTRL) & AUART_LINECTRL_BAUD_DIVFRAC_MASK)) >> AUART_LINECTRL_BAUD_DIVFRAC_SHIFT; if (quot == 0) quot = 1; *baud = (port->uartclk << 2) / quot; } static int __init auart_console_setup(struct console *co, char *options) { struct mxs_auart_port *s; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; int ret; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index == -1 || co->index >= ARRAY_SIZE(auart_port)) co->index = 0; s = auart_port[co->index]; if (!s) return -ENODEV; ret = clk_prepare_enable(s->clk); if (ret) return ret; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else auart_console_get_options(s, &baud, &parity, &bits); ret = uart_set_options(&s->port, co, baud, parity, bits, flow); clk_disable_unprepare(s->clk); return ret; } static struct console auart_console = { .name = "ttyAPP", .write = auart_console_write, .device = uart_console_device, .setup = auart_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &auart_driver, }; #endif static struct uart_driver auart_driver = { .owner = THIS_MODULE, .driver_name = "ttyAPP", .dev_name = "ttyAPP", .major = 0, .minor = 0, .nr = MXS_AUART_PORTS, #ifdef CONFIG_SERIAL_MXS_AUART_CONSOLE .cons = &auart_console, #endif }; static void mxs_init_regs(struct mxs_auart_port *s) { if (is_asm9260_auart(s)) s->vendor = &vendor_alphascale_asm9260; else s->vendor = &vendor_freescale_stmp37xx; } static int mxs_get_clks(struct mxs_auart_port *s, struct platform_device *pdev) { int err; if (!is_asm9260_auart(s)) { s->clk = devm_clk_get(&pdev->dev, NULL); return PTR_ERR_OR_ZERO(s->clk); } s->clk = devm_clk_get(s->dev, "mod"); if (IS_ERR(s->clk)) { dev_err(s->dev, "Failed to get \"mod\" clk\n"); return PTR_ERR(s->clk); } s->clk_ahb = devm_clk_get(s->dev, "ahb"); if (IS_ERR(s->clk_ahb)) { dev_err(s->dev, "Failed to get \"ahb\" clk\n"); return PTR_ERR(s->clk_ahb); } err = clk_prepare_enable(s->clk_ahb); if (err) { dev_err(s->dev, "Failed to enable ahb_clk!\n"); return err; } err = clk_set_rate(s->clk, clk_get_rate(s->clk_ahb)); if (err) { dev_err(s->dev, "Failed to set rate!\n"); goto disable_clk_ahb; } err = clk_prepare_enable(s->clk); if (err) { dev_err(s->dev, "Failed to enable clk!\n"); goto disable_clk_ahb; } return 0; disable_clk_ahb: clk_disable_unprepare(s->clk_ahb); return err; } static int mxs_auart_init_gpios(struct mxs_auart_port *s, struct device *dev) { enum mctrl_gpio_idx i; struct gpio_desc *gpiod; s->gpios = mctrl_gpio_init_noauto(dev, 0); if (IS_ERR(s->gpios)) return PTR_ERR(s->gpios); /* Block (enabled before) DMA option if RTS or CTS is GPIO line */ if (!RTS_AT_AUART() || !CTS_AT_AUART()) { if (test_bit(MXS_AUART_RTSCTS, &s->flags)) dev_warn(dev, "DMA and flow control via gpio may cause some problems. DMA disabled!\n"); clear_bit(MXS_AUART_RTSCTS, &s->flags); } for (i = 0; i < UART_GPIO_MAX; i++) { gpiod = mctrl_gpio_to_gpiod(s->gpios, i); if (gpiod && (gpiod_get_direction(gpiod) == 1)) s->gpio_irq[i] = gpiod_to_irq(gpiod); else s->gpio_irq[i] = -EINVAL; } return 0; } static void mxs_auart_free_gpio_irq(struct mxs_auart_port *s) { enum mctrl_gpio_idx i; for (i = 0; i < UART_GPIO_MAX; i++) if (s->gpio_irq[i] >= 0) free_irq(s->gpio_irq[i], s); } static int mxs_auart_request_gpio_irq(struct mxs_auart_port *s) { int *irq = s->gpio_irq; enum mctrl_gpio_idx i; int err = 0; for (i = 0; (i < UART_GPIO_MAX) && !err; i++) { if (irq[i] < 0) continue; irq_set_status_flags(irq[i], IRQ_NOAUTOEN); err = request_irq(irq[i], mxs_auart_irq_handle, IRQ_TYPE_EDGE_BOTH, dev_name(s->dev), s); if (err) dev_err(s->dev, "%s - Can't get %d irq\n", __func__, irq[i]); } /* * If something went wrong, rollback. * Be careful: i may be unsigned. */ while (err && (i-- > 0)) if (irq[i] >= 0) free_irq(irq[i], s); return err; } static int mxs_auart_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mxs_auart_port *s; u32 version; int ret, irq; struct resource *r; s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; s->port.dev = &pdev->dev; s->dev = &pdev->dev; ret = of_alias_get_id(np, "serial"); if (ret < 0) { dev_err(&pdev->dev, "failed to get alias id: %d\n", ret); return ret; } s->port.line = ret; if (of_property_read_bool(np, "uart-has-rtscts") || of_property_read_bool(np, "fsl,uart-has-rtscts") /* deprecated */) set_bit(MXS_AUART_RTSCTS, &s->flags); if (s->port.line >= ARRAY_SIZE(auart_port)) { dev_err(&pdev->dev, "serial%d out of range\n", s->port.line); return -EINVAL; } s->devtype = (enum mxs_auart_type)of_device_get_match_data(&pdev->dev); ret = mxs_get_clks(s, pdev); if (ret) return ret; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) { ret = -ENXIO; goto out_disable_clks; } s->port.mapbase = r->start; s->port.membase = ioremap(r->start, resource_size(r)); if (!s->port.membase) { ret = -ENOMEM; goto out_disable_clks; } s->port.ops = &mxs_auart_ops; s->port.iotype = UPIO_MEM; s->port.fifosize = MXS_AUART_FIFO_SIZE; s->port.uartclk = clk_get_rate(s->clk); s->port.type = PORT_IMX; s->port.has_sysrq = IS_ENABLED(CONFIG_SERIAL_MXS_AUART_CONSOLE); mxs_init_regs(s); s->mctrl_prev = 0; irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto out_iounmap; } s->port.irq = irq; ret = devm_request_irq(&pdev->dev, irq, mxs_auart_irq_handle, 0, dev_name(&pdev->dev), s); if (ret) goto out_iounmap; platform_set_drvdata(pdev, s); ret = mxs_auart_init_gpios(s, &pdev->dev); if (ret) { dev_err(&pdev->dev, "Failed to initialize GPIOs.\n"); goto out_iounmap; } /* * Get the GPIO lines IRQ */ ret = mxs_auart_request_gpio_irq(s); if (ret) goto out_iounmap; auart_port[s->port.line] = s; mxs_auart_reset_deassert(s); ret = uart_add_one_port(&auart_driver, &s->port); if (ret) goto out_free_qpio_irq; /* ASM9260 don't have version reg */ if (is_asm9260_auart(s)) { dev_info(&pdev->dev, "Found APPUART ASM9260\n"); } else { version = mxs_read(s, REG_VERSION); dev_info(&pdev->dev, "Found APPUART %d.%d.%d\n", (version >> 24) & 0xff, (version >> 16) & 0xff, version & 0xffff); } return 0; out_free_qpio_irq: mxs_auart_free_gpio_irq(s); auart_port[pdev->id] = NULL; out_iounmap: iounmap(s->port.membase); out_disable_clks: if (is_asm9260_auart(s)) { clk_disable_unprepare(s->clk); clk_disable_unprepare(s->clk_ahb); } return ret; } static void mxs_auart_remove(struct platform_device *pdev) { struct mxs_auart_port *s = platform_get_drvdata(pdev); uart_remove_one_port(&auart_driver, &s->port); auart_port[pdev->id] = NULL; mxs_auart_free_gpio_irq(s); iounmap(s->port.membase); if (is_asm9260_auart(s)) { clk_disable_unprepare(s->clk); clk_disable_unprepare(s->clk_ahb); } } static struct platform_driver mxs_auart_driver = { .probe = mxs_auart_probe, .remove = mxs_auart_remove, .driver = { .name = "mxs-auart", .of_match_table = mxs_auart_dt_ids, }, }; static int __init mxs_auart_init(void) { int r; r = uart_register_driver(&auart_driver); if (r) goto out; r = platform_driver_register(&mxs_auart_driver); if (r) goto out_err; return 0; out_err: uart_unregister_driver(&auart_driver); out: return r; } static void __exit mxs_auart_exit(void) { platform_driver_unregister(&mxs_auart_driver); uart_unregister_driver(&auart_driver); } module_init(mxs_auart_init); module_exit(mxs_auart_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Freescale MXS application uart driver"); MODULE_ALIAS("platform:mxs-auart");
// SPDX-License-Identifier: GPL-2.0-only // // Copyright(c) 2021-2022 Intel Corporation // // Authors: Cezary Rojewski <[email protected]> // Amadeusz Slawinski <[email protected]> // #include <linux/clk.h> #include <linux/dmi.h> #include <linux/i2c.h> #include <linux/input.h> #include <linux/module.h> #include <linux/platform_device.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/rt5682.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include "../../common/soc-intel-quirks.h" #include "../../../codecs/rt5682.h" #include "../utils.h" #define AVS_RT5682_SSP_CODEC(quirk) ((quirk) & GENMASK(2, 0)) #define AVS_RT5682_SSP_CODEC_MASK (GENMASK(2, 0)) #define AVS_RT5682_MCLK_EN BIT(3) #define AVS_RT5682_MCLK_24MHZ BIT(4) #define AVS_RT5682_CODEC_DAI_NAME "rt5682-aif1" /* Default: MCLK on, MCLK 19.2M, SSP0 */ static unsigned long avs_rt5682_quirk = AVS_RT5682_MCLK_EN | AVS_RT5682_SSP_CODEC(0); static int avs_rt5682_quirk_cb(const struct dmi_system_id *id) { avs_rt5682_quirk = (unsigned long)id->driver_data; return 1; } static const struct dmi_system_id avs_rt5682_quirk_table[] = { { .callback = avs_rt5682_quirk_cb, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "WhiskeyLake Client"), }, .driver_data = (void *)(AVS_RT5682_MCLK_EN | AVS_RT5682_MCLK_24MHZ | AVS_RT5682_SSP_CODEC(1)), }, { .callback = avs_rt5682_quirk_cb, .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_PRODUCT_NAME, "Ice Lake Client"), }, .driver_data = (void *)(AVS_RT5682_MCLK_EN | AVS_RT5682_SSP_CODEC(0)), }, {} }; static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Headphone Jack"), SOC_DAPM_PIN_SWITCH("Headset Mic"), }; static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Headset Mic", NULL), }; static const struct snd_soc_dapm_route card_base_routes[] = { /* HP jack connectors - unknown if we have jack detect */ { "Headphone Jack", NULL, "HPOL" }, { "Headphone Jack", NULL, "HPOR" }, /* other jacks */ { "IN1P", NULL, "Headset Mic" }, }; static const struct snd_soc_jack_pin card_jack_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE, }, { .pin = "Headset Mic", .mask = SND_JACK_MICROPHONE, }, }; static int avs_rt5682_codec_init(struct snd_soc_pcm_runtime *runtime) { struct snd_soc_component *component = snd_soc_rtd_to_codec(runtime, 0)->component; struct snd_soc_card *card = runtime->card; struct snd_soc_jack_pin *pins; struct snd_soc_jack *jack; int num_pins, ret; jack = snd_soc_card_get_drvdata(card); num_pins = ARRAY_SIZE(card_jack_pins); pins = devm_kmemdup(card->dev, card_jack_pins, sizeof(*pins) * num_pins, GFP_KERNEL); if (!pins) return -ENOMEM; /* Need to enable ASRC function for 24MHz mclk rate */ if ((avs_rt5682_quirk & AVS_RT5682_MCLK_EN) && (avs_rt5682_quirk & AVS_RT5682_MCLK_24MHZ)) { rt5682_sel_asrc_clk_src(component, RT5682_DA_STEREO1_FILTER | RT5682_AD_STEREO1_FILTER, RT5682_CLK_SEL_I2S1_ASRC); } ret = snd_soc_card_jack_new_pins(card, "Headset Jack", SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | SND_JACK_BTN_2 | SND_JACK_BTN_3, jack, pins, num_pins); if (ret) { dev_err(card->dev, "Headset Jack creation failed: %d\n", ret); return ret; } snd_jack_set_key(jack->jack, SND_JACK_BTN_0, KEY_PLAYPAUSE); snd_jack_set_key(jack->jack, SND_JACK_BTN_1, KEY_VOICECOMMAND); snd_jack_set_key(jack->jack, SND_JACK_BTN_2, KEY_VOLUMEUP); snd_jack_set_key(jack->jack, SND_JACK_BTN_3, KEY_VOLUMEDOWN); ret = snd_soc_component_set_jack(component, jack, NULL); if (ret) { dev_err(card->dev, "Headset Jack call-back failed: %d\n", ret); return ret; } return 0; }; static void avs_rt5682_codec_exit(struct snd_soc_pcm_runtime *rtd) { snd_soc_component_set_jack(snd_soc_rtd_to_codec(rtd, 0)->component, NULL, NULL); } static int avs_rt5682_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *runtime = snd_soc_substream_to_rtd(substream); struct snd_soc_dai *codec_dai = snd_soc_rtd_to_codec(runtime, 0); int pll_source, freq_in, freq_out; int ret; if (avs_rt5682_quirk & AVS_RT5682_MCLK_EN) { pll_source = RT5682_PLL1_S_MCLK; if (avs_rt5682_quirk & AVS_RT5682_MCLK_24MHZ) freq_in = 24000000; else freq_in = 19200000; } else { pll_source = RT5682_PLL1_S_BCLK1; freq_in = params_rate(params) * 50; } freq_out = params_rate(params) * 512; ret = snd_soc_dai_set_pll(codec_dai, RT5682_PLL1, pll_source, freq_in, freq_out); if (ret < 0) dev_err(runtime->dev, "Set PLL failed: %d\n", ret); ret = snd_soc_dai_set_sysclk(codec_dai, RT5682_SCLK_S_PLL1, freq_out, SND_SOC_CLOCK_IN); if (ret < 0) dev_err(runtime->dev, "Set sysclk failed: %d\n", ret); /* slot_width should be equal or larger than data length. */ ret = snd_soc_dai_set_tdm_slot(codec_dai, 0x0, 0x0, 2, params_width(params)); if (ret < 0) dev_err(runtime->dev, "Set TDM slot failed: %d\n", ret); return ret; } static const struct snd_soc_ops avs_rt5682_ops = { .hw_params = avs_rt5682_hw_params, }; static int avs_rt5682_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) { struct snd_interval *rate, *channels; struct snd_mask *fmt; rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); /* The ADSP will convert the FE rate to 48k, stereo */ rate->min = rate->max = 48000; channels->min = channels->max = 2; /* set SSPN to 24 bit */ snd_mask_none(fmt); snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); return 0; } static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, int tdm_slot, struct snd_soc_dai_link **dai_link) { struct snd_soc_dai_link_component *platform; struct snd_soc_dai_link *dl; dl = devm_kzalloc(dev, sizeof(*dl), GFP_KERNEL); platform = devm_kzalloc(dev, sizeof(*platform), GFP_KERNEL); if (!dl || !platform) return -ENOMEM; platform->name = platform_name; dl->name = devm_kasprintf(dev, GFP_KERNEL, AVS_STRING_FMT("SSP", "-Codec", ssp_port, tdm_slot)); dl->cpus = devm_kzalloc(dev, sizeof(*dl->cpus), GFP_KERNEL); dl->codecs = devm_kzalloc(dev, sizeof(*dl->codecs), GFP_KERNEL); if (!dl->name || !dl->cpus || !dl->codecs) return -ENOMEM; dl->cpus->dai_name = devm_kasprintf(dev, GFP_KERNEL, AVS_STRING_FMT("SSP", " Pin", ssp_port, tdm_slot)); dl->codecs->name = devm_kasprintf(dev, GFP_KERNEL, "i2c-10EC5682:00"); dl->codecs->dai_name = devm_kasprintf(dev, GFP_KERNEL, AVS_RT5682_CODEC_DAI_NAME); if (!dl->cpus->dai_name || !dl->codecs->name || !dl->codecs->dai_name) return -ENOMEM; dl->num_cpus = 1; dl->num_codecs = 1; dl->platforms = platform; dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBC_CFC; dl->init = avs_rt5682_codec_init; dl->exit = avs_rt5682_codec_exit; dl->be_hw_params_fixup = avs_rt5682_be_fixup; dl->ops = &avs_rt5682_ops; dl->nonatomic = 1; dl->no_pcm = 1; *dai_link = dl; return 0; } static int avs_card_suspend_pre(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, AVS_RT5682_CODEC_DAI_NAME); return snd_soc_component_set_jack(codec_dai->component, NULL, NULL); } static int avs_card_resume_post(struct snd_soc_card *card) { struct snd_soc_dai *codec_dai = snd_soc_card_get_codec_dai(card, AVS_RT5682_CODEC_DAI_NAME); struct snd_soc_jack *jack = snd_soc_card_get_drvdata(card); return snd_soc_component_set_jack(codec_dai->component, jack, NULL); } static int avs_rt5682_probe(struct platform_device *pdev) { struct snd_soc_dai_link *dai_link; struct snd_soc_acpi_mach *mach; struct snd_soc_card *card; struct snd_soc_jack *jack; struct device *dev = &pdev->dev; const char *pname; int ssp_port, tdm_slot, ret; if (pdev->id_entry && pdev->id_entry->driver_data) avs_rt5682_quirk = (unsigned long)pdev->id_entry->driver_data; dmi_check_system(avs_rt5682_quirk_table); dev_dbg(dev, "avs_rt5682_quirk = %lx\n", avs_rt5682_quirk); mach = dev_get_platdata(dev); pname = mach->mach_params.platform; ret = avs_mach_get_ssp_tdm(dev, mach, &ssp_port, &tdm_slot); if (ret) return ret; ret = avs_create_dai_link(dev, pname, ssp_port, tdm_slot, &dai_link); if (ret) { dev_err(dev, "Failed to create dai link: %d", ret); return ret; } jack = devm_kzalloc(dev, sizeof(*jack), GFP_KERNEL); card = devm_kzalloc(dev, sizeof(*card), GFP_KERNEL); if (!jack || !card) return -ENOMEM; card->name = "avs_rt5682"; card->dev = dev; card->owner = THIS_MODULE; card->suspend_pre = avs_card_suspend_pre; card->resume_post = avs_card_resume_post; card->dai_link = dai_link; card->num_links = 1; card->controls = card_controls; card->num_controls = ARRAY_SIZE(card_controls); card->dapm_widgets = card_widgets; card->num_dapm_widgets = ARRAY_SIZE(card_widgets); card->dapm_routes = card_base_routes; card->num_dapm_routes = ARRAY_SIZE(card_base_routes); card->fully_routed = true; snd_soc_card_set_drvdata(card, jack); ret = snd_soc_fixup_dai_links_platform_name(card, pname); if (ret) return ret; return devm_snd_soc_register_card(dev, card); } static const struct platform_device_id avs_rt5682_driver_ids[] = { { .name = "avs_rt5682", }, {}, }; MODULE_DEVICE_TABLE(platform, avs_rt5682_driver_ids); static struct platform_driver avs_rt5682_driver = { .probe = avs_rt5682_probe, .driver = { .name = "avs_rt5682", .pm = &snd_soc_pm_ops, }, .id_table = avs_rt5682_driver_ids, }; module_platform_driver(avs_rt5682_driver) MODULE_DESCRIPTION("Intel rt5682 machine driver"); MODULE_AUTHOR("Cezary Rojewski <[email protected]>"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PPC64_KDUMP_H #define _PPC64_KDUMP_H #include <asm/page.h> #define KDUMP_KERNELBASE 0x2000000 /* How many bytes to reserve at zero for kdump. The reserve limit should * be greater or equal to the trampoline's end address. * Reserve to the end of the FWNMI area, see head_64.S */ #define KDUMP_RESERVE_LIMIT 0x10000 /* 64K */ #ifdef CONFIG_CRASH_DUMP /* * On PPC64 translation is disabled during trampoline setup, so we use * physical addresses. Though on PPC32 translation is already enabled, * so we can't do the same. Luckily create_trampoline() creates relative * branches, so we can just add the PAGE_OFFSET and don't worry about it. */ #ifdef __powerpc64__ #define KDUMP_TRAMPOLINE_START 0x0100 #define KDUMP_TRAMPOLINE_END 0x3000 #else #define KDUMP_TRAMPOLINE_START (0x0100 + PAGE_OFFSET) #define KDUMP_TRAMPOLINE_END (0x3000 + PAGE_OFFSET) #endif /* __powerpc64__ */ #define KDUMP_MIN_TCE_ENTRIES 2048 #endif /* CONFIG_CRASH_DUMP */ #ifndef __ASSEMBLY__ #if defined(CONFIG_CRASH_DUMP) && !defined(CONFIG_NONSTATIC_KERNEL) extern void reserve_kdump_trampoline(void); extern void setup_kdump_trampoline(void); #else /* !CRASH_DUMP || !NONSTATIC_KERNEL */ static inline void reserve_kdump_trampoline(void) { ; } static inline void setup_kdump_trampoline(void) { ; } #endif #endif /* __ASSEMBLY__ */ #endif /* __PPC64_KDUMP_H */
// SPDX-License-Identifier: GPL-2.0 /* * JZ4740 ECC controller driver * * Copyright (c) 2019 Paul Cercueil <[email protected]> * * based on jz4740-nand.c */ #include <linux/bitops.h> #include <linux/device.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include "ingenic_ecc.h" #define JZ_REG_NAND_ECC_CTRL 0x00 #define JZ_REG_NAND_DATA 0x04 #define JZ_REG_NAND_PAR0 0x08 #define JZ_REG_NAND_PAR1 0x0C #define JZ_REG_NAND_PAR2 0x10 #define JZ_REG_NAND_IRQ_STAT 0x14 #define JZ_REG_NAND_IRQ_CTRL 0x18 #define JZ_REG_NAND_ERR(x) (0x1C + ((x) << 2)) #define JZ_NAND_ECC_CTRL_PAR_READY BIT(4) #define JZ_NAND_ECC_CTRL_ENCODING BIT(3) #define JZ_NAND_ECC_CTRL_RS BIT(2) #define JZ_NAND_ECC_CTRL_RESET BIT(1) #define JZ_NAND_ECC_CTRL_ENABLE BIT(0) #define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29)) #define JZ_NAND_STATUS_PAD_FINISH BIT(4) #define JZ_NAND_STATUS_DEC_FINISH BIT(3) #define JZ_NAND_STATUS_ENC_FINISH BIT(2) #define JZ_NAND_STATUS_UNCOR_ERROR BIT(1) #define JZ_NAND_STATUS_ERROR BIT(0) static const uint8_t empty_block_ecc[] = { 0xcd, 0x9d, 0x90, 0x58, 0xf4, 0x8b, 0xff, 0xb7, 0x6f }; static void jz4740_ecc_reset(struct ingenic_ecc *ecc, bool calc_ecc) { uint32_t reg; /* Clear interrupt status */ writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT); /* Initialize and enable ECC hardware */ reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL); reg |= JZ_NAND_ECC_CTRL_RESET; reg |= JZ_NAND_ECC_CTRL_ENABLE; reg |= JZ_NAND_ECC_CTRL_RS; if (calc_ecc) /* calculate ECC from data */ reg |= JZ_NAND_ECC_CTRL_ENCODING; else /* correct data from ECC */ reg &= ~JZ_NAND_ECC_CTRL_ENCODING; writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL); } static int jz4740_ecc_calculate(struct ingenic_ecc *ecc, struct ingenic_ecc_params *params, const u8 *buf, u8 *ecc_code) { uint32_t reg, status; unsigned int timeout = 1000; int i; jz4740_ecc_reset(ecc, true); do { status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT); } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout); if (timeout == 0) return -ETIMEDOUT; reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL); reg &= ~JZ_NAND_ECC_CTRL_ENABLE; writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL); for (i = 0; i < params->bytes; ++i) ecc_code[i] = readb(ecc->base + JZ_REG_NAND_PAR0 + i); /* * If the written data is completely 0xff, we also want to write 0xff as * ECC, otherwise we will get in trouble when doing subpage writes. */ if (memcmp(ecc_code, empty_block_ecc, sizeof(empty_block_ecc)) == 0) memset(ecc_code, 0xff, sizeof(empty_block_ecc)); return 0; } static void jz_nand_correct_data(uint8_t *buf, int index, int mask) { int offset = index & 0x7; uint16_t data; index += (index >> 3); data = buf[index]; data |= buf[index + 1] << 8; mask ^= (data >> offset) & 0x1ff; data &= ~(0x1ff << offset); data |= (mask << offset); buf[index] = data & 0xff; buf[index + 1] = (data >> 8) & 0xff; } static int jz4740_ecc_correct(struct ingenic_ecc *ecc, struct ingenic_ecc_params *params, u8 *buf, u8 *ecc_code) { int i, error_count, index; uint32_t reg, status, error; unsigned int timeout = 1000; jz4740_ecc_reset(ecc, false); for (i = 0; i < params->bytes; ++i) writeb(ecc_code[i], ecc->base + JZ_REG_NAND_PAR0 + i); reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL); reg |= JZ_NAND_ECC_CTRL_PAR_READY; writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL); do { status = readl(ecc->base + JZ_REG_NAND_IRQ_STAT); } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout); if (timeout == 0) return -ETIMEDOUT; reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL); reg &= ~JZ_NAND_ECC_CTRL_ENABLE; writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL); if (status & JZ_NAND_STATUS_ERROR) { if (status & JZ_NAND_STATUS_UNCOR_ERROR) return -EBADMSG; error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29; for (i = 0; i < error_count; ++i) { error = readl(ecc->base + JZ_REG_NAND_ERR(i)); index = ((error >> 16) & 0x1ff) - 1; if (index >= 0 && index < params->size) jz_nand_correct_data(buf, index, error & 0x1ff); } return error_count; } return 0; } static void jz4740_ecc_disable(struct ingenic_ecc *ecc) { u32 reg; writel(0, ecc->base + JZ_REG_NAND_IRQ_STAT); reg = readl(ecc->base + JZ_REG_NAND_ECC_CTRL); reg &= ~JZ_NAND_ECC_CTRL_ENABLE; writel(reg, ecc->base + JZ_REG_NAND_ECC_CTRL); } static const struct ingenic_ecc_ops jz4740_ecc_ops = { .disable = jz4740_ecc_disable, .calculate = jz4740_ecc_calculate, .correct = jz4740_ecc_correct, }; static const struct of_device_id jz4740_ecc_dt_match[] = { { .compatible = "ingenic,jz4740-ecc", .data = &jz4740_ecc_ops }, {}, }; MODULE_DEVICE_TABLE(of, jz4740_ecc_dt_match); static struct platform_driver jz4740_ecc_driver = { .probe = ingenic_ecc_probe, .driver = { .name = "jz4740-ecc", .of_match_table = jz4740_ecc_dt_match, }, }; module_platform_driver(jz4740_ecc_driver); MODULE_AUTHOR("Paul Cercueil <[email protected]>"); MODULE_DESCRIPTION("Ingenic JZ4740 ECC controller driver"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0-or-later /* * Support for the interrupt controllers found on Power Macintosh, * currently Apple's "Grand Central" interrupt controller in all * its incarnations. OpenPIC support used on newer machines is * in a separate file * * Copyright (C) 1997 Paul Mackerras ([email protected]) * Copyright (C) 2005 Benjamin Herrenschmidt ([email protected]) * IBM, Corp. */ #include <linux/stddef.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/syscore_ops.h> #include <linux/adb.h> #include <linux/minmax.h> #include <linux/pmu.h> #include <linux/irqdomain.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/sections.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/pci-bridge.h> #include <asm/time.h> #include <asm/pmac_feature.h> #include <asm/mpic.h> #include <asm/xmon.h> #include "pmac.h" #ifdef CONFIG_PPC32 struct pmac_irq_hw { unsigned int event; unsigned int enable; unsigned int ack; unsigned int level; }; /* Workaround flags for 32bit powermac machines */ unsigned int of_irq_workarounds; struct device_node *of_irq_dflt_pic; /* Default addresses */ static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4]; static int max_irqs; static int max_real_irqs; static DEFINE_RAW_SPINLOCK(pmac_pic_lock); /* The max irq number this driver deals with is 128; see max_irqs */ static DECLARE_BITMAP(ppc_lost_interrupts, 128); static DECLARE_BITMAP(ppc_cached_irq_mask, 128); static int pmac_irq_cascade = -1; static struct irq_domain *pmac_pic_host; static void __pmac_retrigger(unsigned int irq_nr) { if (irq_nr >= max_real_irqs && pmac_irq_cascade > 0) { __set_bit(irq_nr, ppc_lost_interrupts); irq_nr = pmac_irq_cascade; mb(); } if (!__test_and_set_bit(irq_nr, ppc_lost_interrupts)) { atomic_inc(&ppc_n_lost_interrupts); set_dec(1); } } static void pmac_mask_and_ack_irq(struct irq_data *d) { unsigned int src = irqd_to_hwirq(d); unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; unsigned long flags; raw_spin_lock_irqsave(&pmac_pic_lock, flags); __clear_bit(src, ppc_cached_irq_mask); if (__test_and_clear_bit(src, ppc_lost_interrupts)) atomic_dec(&ppc_n_lost_interrupts); out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); out_le32(&pmac_irq_hw[i]->ack, bit); do { /* make sure ack gets to controller before we enable interrupts */ mb(); } while((in_le32(&pmac_irq_hw[i]->enable) & bit) != (ppc_cached_irq_mask[i] & bit)); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static void pmac_ack_irq(struct irq_data *d) { unsigned int src = irqd_to_hwirq(d); unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; unsigned long flags; raw_spin_lock_irqsave(&pmac_pic_lock, flags); if (__test_and_clear_bit(src, ppc_lost_interrupts)) atomic_dec(&ppc_n_lost_interrupts); out_le32(&pmac_irq_hw[i]->ack, bit); (void)in_le32(&pmac_irq_hw[i]->ack); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) { unsigned long bit = 1UL << (irq_nr & 0x1f); int i = irq_nr >> 5; if ((unsigned)irq_nr >= max_irqs) return; /* enable unmasked interrupts */ out_le32(&pmac_irq_hw[i]->enable, ppc_cached_irq_mask[i]); do { /* make sure mask gets to controller before we return to user */ mb(); } while((in_le32(&pmac_irq_hw[i]->enable) & bit) != (ppc_cached_irq_mask[i] & bit)); /* * Unfortunately, setting the bit in the enable register * when the device interrupt is already on *doesn't* set * the bit in the flag register or request another interrupt. */ if (bit & ppc_cached_irq_mask[i] & in_le32(&pmac_irq_hw[i]->level)) __pmac_retrigger(irq_nr); } /* When an irq gets requested for the first client, if it's an * edge interrupt, we clear any previous one on the controller */ static unsigned int pmac_startup_irq(struct irq_data *d) { unsigned long flags; unsigned int src = irqd_to_hwirq(d); unsigned long bit = 1UL << (src & 0x1f); int i = src >> 5; raw_spin_lock_irqsave(&pmac_pic_lock, flags); if (!irqd_is_level_type(d)) out_le32(&pmac_irq_hw[i]->ack, bit); __set_bit(src, ppc_cached_irq_mask); __pmac_set_irq_mask(src, 0); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); return 0; } static void pmac_mask_irq(struct irq_data *d) { unsigned long flags; unsigned int src = irqd_to_hwirq(d); raw_spin_lock_irqsave(&pmac_pic_lock, flags); __clear_bit(src, ppc_cached_irq_mask); __pmac_set_irq_mask(src, 1); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static void pmac_unmask_irq(struct irq_data *d) { unsigned long flags; unsigned int src = irqd_to_hwirq(d); raw_spin_lock_irqsave(&pmac_pic_lock, flags); __set_bit(src, ppc_cached_irq_mask); __pmac_set_irq_mask(src, 0); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); } static int pmac_retrigger(struct irq_data *d) { unsigned long flags; raw_spin_lock_irqsave(&pmac_pic_lock, flags); __pmac_retrigger(irqd_to_hwirq(d)); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); return 1; } static struct irq_chip pmac_pic = { .name = "PMAC-PIC", .irq_startup = pmac_startup_irq, .irq_mask = pmac_mask_irq, .irq_ack = pmac_ack_irq, .irq_mask_ack = pmac_mask_and_ack_irq, .irq_unmask = pmac_unmask_irq, .irq_retrigger = pmac_retrigger, }; static irqreturn_t gatwick_action(int cpl, void *dev_id) { unsigned long flags; int irq, bits; int rc = IRQ_NONE; raw_spin_lock_irqsave(&pmac_pic_lock, flags); for (irq = max_irqs; (irq -= 32) >= max_real_irqs; ) { int i = irq >> 5; bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; bits |= in_le32(&pmac_irq_hw[i]->level); bits &= ppc_cached_irq_mask[i]; if (bits == 0) continue; irq += __ilog2(bits); raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); generic_handle_irq(irq); raw_spin_lock_irqsave(&pmac_pic_lock, flags); rc = IRQ_HANDLED; } raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); return rc; } static unsigned int pmac_pic_get_irq(void) { int irq; unsigned long bits = 0; unsigned long flags; #ifdef CONFIG_PPC_PMAC32_PSURGE /* IPI's are a hack on the powersurge -- Cort */ if (smp_processor_id() != 0) { return psurge_secondary_virq; } #endif /* CONFIG_PPC_PMAC32_PSURGE */ raw_spin_lock_irqsave(&pmac_pic_lock, flags); for (irq = max_real_irqs; (irq -= 32) >= 0; ) { int i = irq >> 5; bits = in_le32(&pmac_irq_hw[i]->event) | ppc_lost_interrupts[i]; bits |= in_le32(&pmac_irq_hw[i]->level); bits &= ppc_cached_irq_mask[i]; if (bits == 0) continue; irq += __ilog2(bits); break; } raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); if (unlikely(irq < 0)) return 0; return irq_linear_revmap(pmac_pic_host, irq); } static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, enum irq_domain_bus_token bus_token) { /* We match all, we don't always have a node anyway */ return 1; } static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { if (hw >= max_irqs) return -EINVAL; /* Mark level interrupts, set delayed disable for edge ones and set * handlers */ irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &pmac_pic, handle_level_irq); return 0; } static const struct irq_domain_ops pmac_pic_host_ops = { .match = pmac_pic_host_match, .map = pmac_pic_host_map, .xlate = irq_domain_xlate_onecell, }; static void __init pmac_pic_probe_oldstyle(void) { int i; struct device_node *master = NULL; struct device_node *slave = NULL; u8 __iomem *addr; struct resource r; /* Set our get_irq function */ ppc_md.get_irq = pmac_pic_get_irq; /* * Find the interrupt controller type & node */ if ((master = of_find_node_by_name(NULL, "gc")) != NULL) { max_irqs = max_real_irqs = 32; } else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) { max_irqs = max_real_irqs = 32; /* We might have a second cascaded ohare */ slave = of_find_node_by_name(NULL, "pci106b,7"); if (slave) max_irqs = 64; } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { max_irqs = max_real_irqs = 64; /* We might have a second cascaded heathrow */ /* Compensate for of_node_put() in of_find_node_by_name() */ of_node_get(master); slave = of_find_node_by_name(master, "mac-io"); /* Check ordering of master & slave */ if (of_device_is_compatible(master, "gatwick")) { BUG_ON(slave == NULL); swap(master, slave); } /* We found a slave */ if (slave) max_irqs = 128; } BUG_ON(master == NULL); /* * Allocate an irq host */ pmac_pic_host = irq_domain_add_linear(master, max_irqs, &pmac_pic_host_ops, NULL); BUG_ON(pmac_pic_host == NULL); irq_set_default_host(pmac_pic_host); /* Get addresses of first controller if we have a node for it */ BUG_ON(of_address_to_resource(master, 0, &r)); /* Map interrupts of primary controller */ addr = (u8 __iomem *) ioremap(r.start, 0x40); i = 0; pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) (addr + 0x20); if (max_real_irqs > 32) pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) (addr + 0x10); of_node_put(master); printk(KERN_INFO "irq: Found primary Apple PIC %pOF for %d irqs\n", master, max_real_irqs); /* Map interrupts of cascaded controller */ if (slave && !of_address_to_resource(slave, 0, &r)) { addr = (u8 __iomem *)ioremap(r.start, 0x40); pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) (addr + 0x20); if (max_irqs > 64) pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) (addr + 0x10); pmac_irq_cascade = irq_of_parse_and_map(slave, 0); printk(KERN_INFO "irq: Found slave Apple PIC %pOF for %d irqs" " cascade: %d\n", slave, max_irqs - max_real_irqs, pmac_irq_cascade); } of_node_put(slave); /* Disable all interrupts in all controllers */ for (i = 0; i * 32 < max_irqs; ++i) out_le32(&pmac_irq_hw[i]->enable, 0); /* Hookup cascade irq */ if (slave && pmac_irq_cascade) { if (request_irq(pmac_irq_cascade, gatwick_action, IRQF_NO_THREAD, "cascade", NULL)) pr_err("Failed to register cascade interrupt\n"); } printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); #ifdef CONFIG_XMON i = irq_create_mapping(NULL, 20); if (request_irq(i, xmon_irq, IRQF_NO_THREAD, "NMI - XMON", NULL)) pr_err("Failed to register NMI-XMON interrupt\n"); #endif } int of_irq_parse_oldworld(const struct device_node *device, int index, struct of_phandle_args *out_irq) { const u32 *ints = NULL; int intlen; /* * Old machines just have a list of interrupt numbers * and no interrupt-controller nodes. We also have dodgy * cases where the APPL,interrupts property is completely * missing behind pci-pci bridges and we have to get it * from the parent (the bridge itself, as apple just wired * everything together on these) */ while (device) { ints = of_get_property(device, "AAPL,interrupts", &intlen); if (ints != NULL) break; device = device->parent; if (!of_node_is_type(device, "pci")) break; } if (ints == NULL) return -EINVAL; intlen /= sizeof(u32); if (index >= intlen) return -EINVAL; out_irq->np = NULL; out_irq->args[0] = ints[index]; out_irq->args_count = 1; return 0; } #endif /* CONFIG_PPC32 */ static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) { #if defined(CONFIG_XMON) && defined(CONFIG_PPC32) struct device_node* pswitch; int nmi_irq; pswitch = of_find_node_by_name(NULL, "programmer-switch"); if (pswitch) { nmi_irq = irq_of_parse_and_map(pswitch, 0); if (nmi_irq) { mpic_irq_set_priority(nmi_irq, 9); if (request_irq(nmi_irq, xmon_irq, IRQF_NO_THREAD, "NMI - XMON", NULL)) pr_err("Failed to register NMI-XMON interrupt\n"); } of_node_put(pswitch); } #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ } static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, int master) { const char *name = master ? " MPIC 1 " : " MPIC 2 "; struct mpic *mpic; unsigned int flags = master ? 0 : MPIC_SECONDARY; pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); if (of_property_read_bool(np, "big-endian")) flags |= MPIC_BIG_ENDIAN; /* Primary Big Endian means HT interrupts. This is quite dodgy * but works until I find a better way */ if (master && (flags & MPIC_BIG_ENDIAN)) flags |= MPIC_U3_HT_IRQS; mpic = mpic_alloc(np, 0, flags, 0, 0, name); if (mpic == NULL) return NULL; mpic_init(mpic); return mpic; } static int __init pmac_pic_probe_mpic(void) { struct mpic *mpic1, *mpic2; struct device_node *np, *master = NULL, *slave = NULL; /* We can have up to 2 MPICs cascaded */ for_each_node_by_type(np, "open-pic") { if (master == NULL && !of_property_present(np, "interrupts")) master = of_node_get(np); else if (slave == NULL) slave = of_node_get(np); if (master && slave) { of_node_put(np); break; } } /* Check for bogus setups */ if (master == NULL && slave != NULL) { master = slave; slave = NULL; } /* Not found, default to good old pmac pic */ if (master == NULL) return -ENODEV; /* Set master handler */ ppc_md.get_irq = mpic_get_irq; /* Setup master */ mpic1 = pmac_setup_one_mpic(master, 1); BUG_ON(mpic1 == NULL); /* Install NMI if any */ pmac_pic_setup_mpic_nmi(mpic1); of_node_put(master); /* Set up a cascaded controller, if present */ if (slave) { mpic2 = pmac_setup_one_mpic(slave, 0); if (mpic2 == NULL) printk(KERN_ERR "Failed to setup slave MPIC\n"); of_node_put(slave); } return 0; } void __init pmac_pic_init(void) { /* We configure the OF parsing based on our oldworld vs. newworld * platform type and whether we were booted by BootX. */ #ifdef CONFIG_PPC32 if (!pmac_newworld) of_irq_workarounds |= OF_IMAP_OLDWORLD_MAC; if (of_property_read_bool(of_chosen, "linux,bootx")) of_irq_workarounds |= OF_IMAP_NO_PHANDLE; /* If we don't have phandles on a newworld, then try to locate a * default interrupt controller (happens when booting with BootX). * We do a first match here, hopefully, that only ever happens on * machines with one controller. */ if (pmac_newworld && (of_irq_workarounds & OF_IMAP_NO_PHANDLE)) { struct device_node *np; for_each_node_with_property(np, "interrupt-controller") { /* Skip /chosen/interrupt-controller */ if (of_node_name_eq(np, "chosen")) continue; /* It seems like at least one person wants * to use BootX on a machine with an AppleKiwi * controller which happens to pretend to be an * interrupt controller too. */ if (of_node_name_eq(np, "AppleKiwi")) continue; /* I think we found one ! */ of_irq_dflt_pic = np; break; } } #endif /* CONFIG_PPC32 */ /* We first try to detect Apple's new Core99 chipset, since mac-io * is quite different on those machines and contains an IBM MPIC2. */ if (pmac_pic_probe_mpic() == 0) return; #ifdef CONFIG_PPC32 pmac_pic_probe_oldstyle(); #endif } #if defined(CONFIG_PM) && defined(CONFIG_PPC32) /* * These procedures are used in implementing sleep on the powerbooks. * sleep_save_intrs() saves the states of all interrupt enables * and disables all interrupts except for the nominated one. * sleep_restore_intrs() restores the states of all interrupt enables. */ unsigned long sleep_save_mask[2]; /* This used to be passed by the PMU driver but that link got * broken with the new driver model. We use this tweak for now... * We really want to do things differently though... */ static int pmacpic_find_viaint(void) { int viaint = -1; #ifdef CONFIG_ADB_PMU struct device_node *np; if (pmu_get_model() != PMU_OHARE_BASED) goto not_found; np = of_find_node_by_name(NULL, "via-pmu"); if (np == NULL) goto not_found; viaint = irq_of_parse_and_map(np, 0); of_node_put(np); not_found: #endif /* CONFIG_ADB_PMU */ return viaint; } static int pmacpic_suspend(void) { int viaint = pmacpic_find_viaint(); sleep_save_mask[0] = ppc_cached_irq_mask[0]; sleep_save_mask[1] = ppc_cached_irq_mask[1]; ppc_cached_irq_mask[0] = 0; ppc_cached_irq_mask[1] = 0; if (viaint > 0) set_bit(viaint, ppc_cached_irq_mask); out_le32(&pmac_irq_hw[0]->enable, ppc_cached_irq_mask[0]); if (max_real_irqs > 32) out_le32(&pmac_irq_hw[1]->enable, ppc_cached_irq_mask[1]); (void)in_le32(&pmac_irq_hw[0]->event); /* make sure mask gets to controller before we return to caller */ mb(); (void)in_le32(&pmac_irq_hw[0]->enable); return 0; } static void pmacpic_resume(void) { int i; out_le32(&pmac_irq_hw[0]->enable, 0); if (max_real_irqs > 32) out_le32(&pmac_irq_hw[1]->enable, 0); mb(); for (i = 0; i < max_real_irqs; ++i) if (test_bit(i, sleep_save_mask)) pmac_unmask_irq(irq_get_irq_data(i)); } static struct syscore_ops pmacpic_syscore_ops = { .suspend = pmacpic_suspend, .resume = pmacpic_resume, }; static int __init init_pmacpic_syscore(void) { if (pmac_irq_hw[0]) register_syscore_ops(&pmacpic_syscore_ops); return 0; } machine_subsys_initcall(powermac, init_pmacpic_syscore); #endif /* CONFIG_PM && CONFIG_PPC32 */
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright(c) 2023-2024 Intel Corporation * * Authors: Cezary Rojewski <[email protected]> * Amadeusz Slawinski <[email protected]> */ #ifndef __ACPI_NHLT_H__ #define __ACPI_NHLT_H__ #include <linux/acpi.h> #include <linux/kconfig.h> #include <linux/overflow.h> #include <linux/types.h> #define __acpi_nhlt_endpoint_config(ep) ((void *)((ep) + 1)) #define __acpi_nhlt_config_caps(cfg) ((void *)((cfg) + 1)) /** * acpi_nhlt_endpoint_fmtscfg - Get the formats configuration space. * @ep: the endpoint to retrieve the space for. * * Return: A pointer to the formats configuration space. */ static inline struct acpi_nhlt_formats_config * acpi_nhlt_endpoint_fmtscfg(const struct acpi_nhlt_endpoint *ep) { struct acpi_nhlt_config *cfg = __acpi_nhlt_endpoint_config(ep); return (struct acpi_nhlt_formats_config *)((u8 *)(cfg + 1) + cfg->capabilities_size); } #define __acpi_nhlt_first_endpoint(tb) \ ((void *)(tb + 1)) #define __acpi_nhlt_next_endpoint(ep) \ ((void *)((u8 *)(ep) + (ep)->length)) #define __acpi_nhlt_get_endpoint(tb, ep, i) \ ((i) ? __acpi_nhlt_next_endpoint(ep) : __acpi_nhlt_first_endpoint(tb)) #define __acpi_nhlt_first_fmtcfg(fmts) \ ((void *)(fmts + 1)) #define __acpi_nhlt_next_fmtcfg(fmt) \ ((void *)((u8 *)((fmt) + 1) + (fmt)->config.capabilities_size)) #define __acpi_nhlt_get_fmtcfg(fmts, fmt, i) \ ((i) ? __acpi_nhlt_next_fmtcfg(fmt) : __acpi_nhlt_first_fmtcfg(fmts)) /* * The for_each_nhlt_*() macros rely on an iterator to deal with the * variable length of each endpoint structure and the possible presence * of an OED-Config used by Windows only. */ /** * for_each_nhlt_endpoint - Iterate over endpoints in a NHLT table. * @tb: the pointer to a NHLT table. * @ep: the pointer to endpoint to use as loop cursor. */ #define for_each_nhlt_endpoint(tb, ep) \ for (unsigned int __i = 0; \ __i < (tb)->endpoints_count && \ (ep = __acpi_nhlt_get_endpoint(tb, ep, __i)); \ __i++) /** * for_each_nhlt_fmtcfg - Iterate over format configurations. * @fmts: the pointer to formats configuration space. * @fmt: the pointer to format to use as loop cursor. */ #define for_each_nhlt_fmtcfg(fmts, fmt) \ for (unsigned int __i = 0; \ __i < (fmts)->formats_count && \ (fmt = __acpi_nhlt_get_fmtcfg(fmts, fmt, __i)); \ __i++) /** * for_each_nhlt_endpoint_fmtcfg - Iterate over format configurations in an endpoint. * @ep: the pointer to an endpoint. * @fmt: the pointer to format to use as loop cursor. */ #define for_each_nhlt_endpoint_fmtcfg(ep, fmt) \ for_each_nhlt_fmtcfg(acpi_nhlt_endpoint_fmtscfg(ep), fmt) #if IS_ENABLED(CONFIG_ACPI_NHLT) /* * System-wide pointer to the first NHLT table. * * A sound driver may utilize acpi_nhlt_get/put_gbl_table() on its * initialization and removal respectively to avoid excessive mapping * and unmapping of the memory occupied by the table between streaming * operations. */ acpi_status acpi_nhlt_get_gbl_table(void); void acpi_nhlt_put_gbl_table(void); bool acpi_nhlt_endpoint_match(const struct acpi_nhlt_endpoint *ep, int link_type, int dev_type, int dir, int bus_id); struct acpi_nhlt_endpoint * acpi_nhlt_tb_find_endpoint(const struct acpi_table_nhlt *tb, int link_type, int dev_type, int dir, int bus_id); struct acpi_nhlt_endpoint * acpi_nhlt_find_endpoint(int link_type, int dev_type, int dir, int bus_id); struct acpi_nhlt_format_config * acpi_nhlt_endpoint_find_fmtcfg(const struct acpi_nhlt_endpoint *ep, u16 ch, u32 rate, u16 vbps, u16 bps); struct acpi_nhlt_format_config * acpi_nhlt_tb_find_fmtcfg(const struct acpi_table_nhlt *tb, int link_type, int dev_type, int dir, int bus_id, u16 ch, u32 rate, u16 vpbs, u16 bps); struct acpi_nhlt_format_config * acpi_nhlt_find_fmtcfg(int link_type, int dev_type, int dir, int bus_id, u16 ch, u32 rate, u16 vpbs, u16 bps); int acpi_nhlt_endpoint_mic_count(const struct acpi_nhlt_endpoint *ep); #else /* !CONFIG_ACPI_NHLT */ static inline acpi_status acpi_nhlt_get_gbl_table(void) { return AE_NOT_FOUND; } static inline void acpi_nhlt_put_gbl_table(void) { } static inline bool acpi_nhlt_endpoint_match(const struct acpi_nhlt_endpoint *ep, int link_type, int dev_type, int dir, int bus_id) { return false; } static inline struct acpi_nhlt_endpoint * acpi_nhlt_tb_find_endpoint(const struct acpi_table_nhlt *tb, int link_type, int dev_type, int dir, int bus_id) { return NULL; } static inline struct acpi_nhlt_format_config * acpi_nhlt_endpoint_find_fmtcfg(const struct acpi_nhlt_endpoint *ep, u16 ch, u32 rate, u16 vbps, u16 bps) { return NULL; } static inline struct acpi_nhlt_format_config * acpi_nhlt_tb_find_fmtcfg(const struct acpi_table_nhlt *tb, int link_type, int dev_type, int dir, int bus_id, u16 ch, u32 rate, u16 vpbs, u16 bps) { return NULL; } static inline int acpi_nhlt_endpoint_mic_count(const struct acpi_nhlt_endpoint *ep) { return 0; } static inline struct acpi_nhlt_endpoint * acpi_nhlt_find_endpoint(int link_type, int dev_type, int dir, int bus_id) { return NULL; } static inline struct acpi_nhlt_format_config * acpi_nhlt_find_fmtcfg(int link_type, int dev_type, int dir, int bus_id, u16 ch, u32 rate, u16 vpbs, u16 bps) { return NULL; } #endif /* CONFIG_ACPI_NHLT */ #endif /* __ACPI_NHLT_H__ */
/* SPDX-License-Identifier: MIT */ #ifndef __NV40_FB_RAM_H__ #define __NV40_FB_RAM_H__ #define nv40_ram(p) container_of((p), struct nv40_ram, base) #include "ram.h" struct nv40_ram { struct nvkm_ram base; u32 ctrl; u32 coef; }; int nv40_ram_new_(struct nvkm_fb *fb, enum nvkm_ram_type, u64, struct nvkm_ram **); #endif
/* SPDX-License-Identifier: GPL-2.0-only */ /* * include/linux/extcon/extcon-adc-jack.h * * Analog Jack extcon driver with ADC-based detection capability. * * Copyright (C) 2012 Samsung Electronics * MyungJoo Ham <[email protected]> */ #ifndef _EXTCON_ADC_JACK_H_ #define _EXTCON_ADC_JACK_H_ __FILE__ #include <linux/module.h> #include <linux/extcon.h> /** * struct adc_jack_cond - condition to use an extcon state * denotes the last adc_jack_cond element among the array) * @id: the unique id of each external connector * @min_adc: min adc value for this condition * @max_adc: max adc value for this condition * * For example, if { .state = 0x3, .min_adc = 100, .max_adc = 200}, it means * that if ADC value is between (inclusive) 100 and 200, than the cable 0 and * 1 are attached (1<<0 | 1<<1 == 0x3) * * Note that you don't need to describe condition for "no cable attached" * because when no adc_jack_cond is met, state = 0 is automatically chosen. */ struct adc_jack_cond { unsigned int id; u32 min_adc; u32 max_adc; }; /** * struct adc_jack_pdata - platform data for adc jack device. * @name: name of the extcon device. If null, "adc-jack" is used. * @consumer_channel: Unique name to identify the channel on the consumer * side. This typically describes the channels used within * the consumer. E.g. 'battery_voltage' * @cable_names: array of extcon id for supported cables. * @adc_contitions: array of struct adc_jack_cond conditions ending * with .state = 0 entry. This describes how to decode * adc values into extcon state. * @irq_flags: irq flags used for the @irq * @handling_delay_ms: in some devices, we need to read ADC value some * milli-seconds after the interrupt occurs. You may * describe such delays with @handling_delay_ms, which * is rounded-off by jiffies. * @wakeup_source: flag to wake up the system for extcon events. */ struct adc_jack_pdata { const char *name; const char *consumer_channel; const unsigned int *cable_names; /* The last entry's state should be 0 */ struct adc_jack_cond *adc_conditions; unsigned long irq_flags; unsigned long handling_delay_ms; /* in ms */ bool wakeup_source; }; #endif /* _EXTCON_ADC_JACK_H */
// SPDX-License-Identifier: GPL-2.0-or-later /* Validate the trust chain of a PKCS#7 message. * * Copyright (C) 2012 Red Hat, Inc. All Rights Reserved. * Written by David Howells ([email protected]) */ #define pr_fmt(fmt) "PKCS7: "fmt #include <linux/kernel.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/asn1.h> #include <linux/key.h> #include <keys/asymmetric-type.h> #include <crypto/public_key.h> #include "pkcs7_parser.h" /* * Check the trust on one PKCS#7 SignedInfo block. */ static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7, struct pkcs7_signed_info *sinfo, struct key *trust_keyring) { struct public_key_signature *sig = sinfo->sig; struct x509_certificate *x509, *last = NULL, *p; struct key *key; int ret; kenter(",%u,", sinfo->index); if (sinfo->unsupported_crypto) { kleave(" = -ENOPKG [cached]"); return -ENOPKG; } for (x509 = sinfo->signer; x509; x509 = x509->signer) { if (x509->seen) { if (x509->verified) goto verified; kleave(" = -ENOKEY [cached]"); return -ENOKEY; } x509->seen = true; /* Look to see if this certificate is present in the trusted * keys. */ key = find_asymmetric_key(trust_keyring, x509->id, x509->skid, NULL, false); if (!IS_ERR(key)) { /* One of the X.509 certificates in the PKCS#7 message * is apparently the same as one we already trust. * Verify that the trusted variant can also validate * the signature on the descendant. */ pr_devel("sinfo %u: Cert %u as key %x\n", sinfo->index, x509->index, key_serial(key)); goto matched; } if (key == ERR_PTR(-ENOMEM)) return -ENOMEM; /* Self-signed certificates form roots of their own, and if we * don't know them, then we can't accept them. */ if (x509->signer == x509) { kleave(" = -ENOKEY [unknown self-signed]"); return -ENOKEY; } might_sleep(); last = x509; sig = last->sig; } /* No match - see if the root certificate has a signer amongst the * trusted keys. */ if (last && (last->sig->auth_ids[0] || last->sig->auth_ids[1])) { key = find_asymmetric_key(trust_keyring, last->sig->auth_ids[0], last->sig->auth_ids[1], NULL, false); if (!IS_ERR(key)) { x509 = last; pr_devel("sinfo %u: Root cert %u signer is key %x\n", sinfo->index, x509->index, key_serial(key)); goto matched; } if (PTR_ERR(key) != -ENOKEY) return PTR_ERR(key); } /* As a last resort, see if we have a trusted public key that matches * the signed info directly. */ key = find_asymmetric_key(trust_keyring, sinfo->sig->auth_ids[0], NULL, NULL, false); if (!IS_ERR(key)) { pr_devel("sinfo %u: Direct signer is key %x\n", sinfo->index, key_serial(key)); x509 = NULL; sig = sinfo->sig; goto matched; } if (PTR_ERR(key) != -ENOKEY) return PTR_ERR(key); kleave(" = -ENOKEY [no backref]"); return -ENOKEY; matched: ret = verify_signature(key, sig); key_put(key); if (ret < 0) { if (ret == -ENOMEM) return ret; kleave(" = -EKEYREJECTED [verify %d]", ret); return -EKEYREJECTED; } verified: if (x509) { x509->verified = true; for (p = sinfo->signer; p != x509; p = p->signer) p->verified = true; } kleave(" = 0"); return 0; } /** * pkcs7_validate_trust - Validate PKCS#7 trust chain * @pkcs7: The PKCS#7 certificate to validate * @trust_keyring: Signing certificates to use as starting points * * Validate that the certificate chain inside the PKCS#7 message intersects * keys we already know and trust. * * Returns, in order of descending priority: * * (*) -EKEYREJECTED if a signature failed to match for which we have a valid * key, or: * * (*) 0 if at least one signature chain intersects with the keys in the trust * keyring, or: * * (*) -ENOPKG if a suitable crypto module couldn't be found for a check on a * chain. * * (*) -ENOKEY if we couldn't find a match for any of the signature chains in * the message. * * May also return -ENOMEM. */ int pkcs7_validate_trust(struct pkcs7_message *pkcs7, struct key *trust_keyring) { struct pkcs7_signed_info *sinfo; struct x509_certificate *p; int cached_ret = -ENOKEY; int ret; for (p = pkcs7->certs; p; p = p->next) p->seen = false; for (sinfo = pkcs7->signed_infos; sinfo; sinfo = sinfo->next) { ret = pkcs7_validate_trust_one(pkcs7, sinfo, trust_keyring); switch (ret) { case -ENOKEY: continue; case -ENOPKG: if (cached_ret == -ENOKEY) cached_ret = -ENOPKG; continue; case 0: cached_ret = 0; continue; default: return ret; } } return cached_ret; } EXPORT_SYMBOL_GPL(pkcs7_validate_trust);
// SPDX-License-Identifier: GPL-2.0 /* * ii_pci20kc.c * Driver for Intelligent Instruments PCI-20001C carrier board and modules. * * Copyright (C) 2000 Markus Kempf <[email protected]> * with suggestions from David Schleef 16.06.2000 */ /* * Driver: ii_pci20kc * Description: Intelligent Instruments PCI-20001C carrier board * Devices: [Intelligent Instrumentation] PCI-20001C (ii_pci20kc) * Author: Markus Kempf <[email protected]> * Status: works * * Supports the PCI-20001C-1a and PCI-20001C-2a carrier boards. The * -2a version has 32 on-board DIO channels. Three add-on modules * can be added to the carrier board for additional functionality. * * Supported add-on modules: * PCI-20006M-1 1 channel, 16-bit analog output module * PCI-20006M-2 2 channel, 16-bit analog output module * PCI-20341M-1A 4 channel, 16-bit analog input module * * Options: * 0 Board base address * 1 IRQ (not-used) */ #include <linux/module.h> #include <linux/io.h> #include <linux/comedi/comedidev.h> /* * Register I/O map */ #define II20K_SIZE 0x400 #define II20K_MOD_OFFSET 0x100 #define II20K_ID_REG 0x00 #define II20K_ID_MOD1_EMPTY BIT(7) #define II20K_ID_MOD2_EMPTY BIT(6) #define II20K_ID_MOD3_EMPTY BIT(5) #define II20K_ID_MASK 0x1f #define II20K_ID_PCI20001C_1A 0x1b /* no on-board DIO */ #define II20K_ID_PCI20001C_2A 0x1d /* on-board DIO */ #define II20K_MOD_STATUS_REG 0x40 #define II20K_MOD_STATUS_IRQ_MOD1 BIT(7) #define II20K_MOD_STATUS_IRQ_MOD2 BIT(6) #define II20K_MOD_STATUS_IRQ_MOD3 BIT(5) #define II20K_DIO0_REG 0x80 #define II20K_DIO1_REG 0x81 #define II20K_DIR_ENA_REG 0x82 #define II20K_DIR_DIO3_OUT BIT(7) #define II20K_DIR_DIO2_OUT BIT(6) #define II20K_BUF_DISAB_DIO3 BIT(5) #define II20K_BUF_DISAB_DIO2 BIT(4) #define II20K_DIR_DIO1_OUT BIT(3) #define II20K_DIR_DIO0_OUT BIT(2) #define II20K_BUF_DISAB_DIO1 BIT(1) #define II20K_BUF_DISAB_DIO0 BIT(0) #define II20K_CTRL01_REG 0x83 #define II20K_CTRL01_SET BIT(7) #define II20K_CTRL01_DIO0_IN BIT(4) #define II20K_CTRL01_DIO1_IN BIT(1) #define II20K_DIO2_REG 0xc0 #define II20K_DIO3_REG 0xc1 #define II20K_CTRL23_REG 0xc3 #define II20K_CTRL23_SET BIT(7) #define II20K_CTRL23_DIO2_IN BIT(4) #define II20K_CTRL23_DIO3_IN BIT(1) #define II20K_ID_PCI20006M_1 0xe2 /* 1 AO channels */ #define II20K_ID_PCI20006M_2 0xe3 /* 2 AO channels */ #define II20K_AO_STRB_REG(x) (0x0b + ((x) * 0x08)) #define II20K_AO_LSB_REG(x) (0x0d + ((x) * 0x08)) #define II20K_AO_MSB_REG(x) (0x0e + ((x) * 0x08)) #define II20K_AO_STRB_BOTH_REG 0x1b #define II20K_ID_PCI20341M_1 0x77 /* 4 AI channels */ #define II20K_AI_STATUS_CMD_REG 0x01 #define II20K_AI_STATUS_CMD_BUSY BIT(7) #define II20K_AI_STATUS_CMD_HW_ENA BIT(1) #define II20K_AI_STATUS_CMD_EXT_START BIT(0) #define II20K_AI_LSB_REG 0x02 #define II20K_AI_MSB_REG 0x03 #define II20K_AI_PACER_RESET_REG 0x04 #define II20K_AI_16BIT_DATA_REG 0x06 #define II20K_AI_CONF_REG 0x10 #define II20K_AI_CONF_ENA BIT(2) #define II20K_AI_OPT_REG 0x11 #define II20K_AI_OPT_TRIG_ENA BIT(5) #define II20K_AI_OPT_TRIG_INV BIT(4) #define II20K_AI_OPT_TIMEBASE(x) (((x) & 0x3) << 1) #define II20K_AI_OPT_BURST_MODE BIT(0) #define II20K_AI_STATUS_REG 0x12 #define II20K_AI_STATUS_INT BIT(7) #define II20K_AI_STATUS_TRIG BIT(6) #define II20K_AI_STATUS_TRIG_ENA BIT(5) #define II20K_AI_STATUS_PACER_ERR BIT(2) #define II20K_AI_STATUS_DATA_ERR BIT(1) #define II20K_AI_STATUS_SET_TIME_ERR BIT(0) #define II20K_AI_LAST_CHAN_ADDR_REG 0x13 #define II20K_AI_CUR_ADDR_REG 0x14 #define II20K_AI_SET_TIME_REG 0x15 #define II20K_AI_DELAY_LSB_REG 0x16 #define II20K_AI_DELAY_MSB_REG 0x17 #define II20K_AI_CHAN_ADV_REG 0x18 #define II20K_AI_CHAN_RESET_REG 0x19 #define II20K_AI_START_TRIG_REG 0x1a #define II20K_AI_COUNT_RESET_REG 0x1b #define II20K_AI_CHANLIST_REG 0x80 #define II20K_AI_CHANLIST_ONBOARD_ONLY BIT(5) #define II20K_AI_CHANLIST_GAIN(x) (((x) & 0x3) << 3) #define II20K_AI_CHANLIST_MUX_ENA BIT(2) #define II20K_AI_CHANLIST_CHAN(x) (((x) & 0x3) << 0) #define II20K_AI_CHANLIST_LEN 0x80 /* the AO range is set by jumpers on the 20006M module */ static const struct comedi_lrange ii20k_ao_ranges = { 3, { BIP_RANGE(5), /* Chan 0 - W1/W3 in Chan 1 - W2/W4 in */ UNI_RANGE(10), /* Chan 0 - W1/W3 out Chan 1 - W2/W4 in */ BIP_RANGE(10) /* Chan 0 - W1/W3 in Chan 1 - W2/W4 out */ } }; static const struct comedi_lrange ii20k_ai_ranges = { 4, { BIP_RANGE(5), /* gain 1 */ BIP_RANGE(0.5), /* gain 10 */ BIP_RANGE(0.05), /* gain 100 */ BIP_RANGE(0.025) /* gain 200 */ }, }; static void __iomem *ii20k_module_iobase(struct comedi_device *dev, struct comedi_subdevice *s) { return dev->mmio + (s->index + 1) * II20K_MOD_OFFSET; } static int ii20k_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { void __iomem *iobase = ii20k_module_iobase(dev, s); unsigned int chan = CR_CHAN(insn->chanspec); int i; for (i = 0; i < insn->n; i++) { unsigned int val = data[i]; s->readback[chan] = val; /* munge the offset binary data to 2's complement */ val = comedi_offset_munge(s, val); writeb(val & 0xff, iobase + II20K_AO_LSB_REG(chan)); writeb((val >> 8) & 0xff, iobase + II20K_AO_MSB_REG(chan)); writeb(0x00, iobase + II20K_AO_STRB_REG(chan)); } return insn->n; } static int ii20k_ai_eoc(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned long context) { void __iomem *iobase = ii20k_module_iobase(dev, s); unsigned char status; status = readb(iobase + II20K_AI_STATUS_REG); if ((status & II20K_AI_STATUS_INT) == 0) return 0; return -EBUSY; } static void ii20k_ai_setup(struct comedi_device *dev, struct comedi_subdevice *s, unsigned int chanspec) { void __iomem *iobase = ii20k_module_iobase(dev, s); unsigned int chan = CR_CHAN(chanspec); unsigned int range = CR_RANGE(chanspec); unsigned char val; /* initialize module */ writeb(II20K_AI_CONF_ENA, iobase + II20K_AI_CONF_REG); /* software conversion */ writeb(0, iobase + II20K_AI_STATUS_CMD_REG); /* set the time base for the settling time counter based on the gain */ val = (range < 3) ? II20K_AI_OPT_TIMEBASE(0) : II20K_AI_OPT_TIMEBASE(2); writeb(val, iobase + II20K_AI_OPT_REG); /* set the settling time counter based on the gain */ val = (range < 2) ? 0x58 : (range < 3) ? 0x93 : 0x99; writeb(val, iobase + II20K_AI_SET_TIME_REG); /* set number of input channels */ writeb(1, iobase + II20K_AI_LAST_CHAN_ADDR_REG); /* set the channel list byte */ val = II20K_AI_CHANLIST_ONBOARD_ONLY | II20K_AI_CHANLIST_MUX_ENA | II20K_AI_CHANLIST_GAIN(range) | II20K_AI_CHANLIST_CHAN(chan); writeb(val, iobase + II20K_AI_CHANLIST_REG); /* reset settling time counter and trigger delay counter */ writeb(0, iobase + II20K_AI_COUNT_RESET_REG); /* reset channel scanner */ writeb(0, iobase + II20K_AI_CHAN_RESET_REG); } static int ii20k_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { void __iomem *iobase = ii20k_module_iobase(dev, s); int ret; int i; ii20k_ai_setup(dev, s, insn->chanspec); for (i = 0; i < insn->n; i++) { unsigned int val; /* generate a software start convert signal */ readb(iobase + II20K_AI_PACER_RESET_REG); ret = comedi_timeout(dev, s, insn, ii20k_ai_eoc, 0); if (ret) return ret; val = readb(iobase + II20K_AI_LSB_REG); val |= (readb(iobase + II20K_AI_MSB_REG) << 8); /* munge the 2's complement data to offset binary */ data[i] = comedi_offset_munge(s, val); } return insn->n; } static void ii20k_dio_config(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned char ctrl01 = 0; unsigned char ctrl23 = 0; unsigned char dir_ena = 0; /* port 0 - channels 0-7 */ if (s->io_bits & 0x000000ff) { /* output port */ ctrl01 &= ~II20K_CTRL01_DIO0_IN; dir_ena &= ~II20K_BUF_DISAB_DIO0; dir_ena |= II20K_DIR_DIO0_OUT; } else { /* input port */ ctrl01 |= II20K_CTRL01_DIO0_IN; dir_ena &= ~II20K_DIR_DIO0_OUT; } /* port 1 - channels 8-15 */ if (s->io_bits & 0x0000ff00) { /* output port */ ctrl01 &= ~II20K_CTRL01_DIO1_IN; dir_ena &= ~II20K_BUF_DISAB_DIO1; dir_ena |= II20K_DIR_DIO1_OUT; } else { /* input port */ ctrl01 |= II20K_CTRL01_DIO1_IN; dir_ena &= ~II20K_DIR_DIO1_OUT; } /* port 2 - channels 16-23 */ if (s->io_bits & 0x00ff0000) { /* output port */ ctrl23 &= ~II20K_CTRL23_DIO2_IN; dir_ena &= ~II20K_BUF_DISAB_DIO2; dir_ena |= II20K_DIR_DIO2_OUT; } else { /* input port */ ctrl23 |= II20K_CTRL23_DIO2_IN; dir_ena &= ~II20K_DIR_DIO2_OUT; } /* port 3 - channels 24-31 */ if (s->io_bits & 0xff000000) { /* output port */ ctrl23 &= ~II20K_CTRL23_DIO3_IN; dir_ena &= ~II20K_BUF_DISAB_DIO3; dir_ena |= II20K_DIR_DIO3_OUT; } else { /* input port */ ctrl23 |= II20K_CTRL23_DIO3_IN; dir_ena &= ~II20K_DIR_DIO3_OUT; } ctrl23 |= II20K_CTRL01_SET; ctrl23 |= II20K_CTRL23_SET; /* order is important */ writeb(ctrl01, dev->mmio + II20K_CTRL01_REG); writeb(ctrl23, dev->mmio + II20K_CTRL23_REG); writeb(dir_ena, dev->mmio + II20K_DIR_ENA_REG); } static int ii20k_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int chan = CR_CHAN(insn->chanspec); unsigned int mask; int ret; if (chan < 8) mask = 0x000000ff; else if (chan < 16) mask = 0x0000ff00; else if (chan < 24) mask = 0x00ff0000; else mask = 0xff000000; ret = comedi_dio_insn_config(dev, s, insn, data, mask); if (ret) return ret; ii20k_dio_config(dev, s); return insn->n; } static int ii20k_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask; mask = comedi_dio_update_state(s, data); if (mask) { if (mask & 0x000000ff) writeb((s->state >> 0) & 0xff, dev->mmio + II20K_DIO0_REG); if (mask & 0x0000ff00) writeb((s->state >> 8) & 0xff, dev->mmio + II20K_DIO1_REG); if (mask & 0x00ff0000) writeb((s->state >> 16) & 0xff, dev->mmio + II20K_DIO2_REG); if (mask & 0xff000000) writeb((s->state >> 24) & 0xff, dev->mmio + II20K_DIO3_REG); } data[1] = readb(dev->mmio + II20K_DIO0_REG); data[1] |= readb(dev->mmio + II20K_DIO1_REG) << 8; data[1] |= readb(dev->mmio + II20K_DIO2_REG) << 16; data[1] |= readb(dev->mmio + II20K_DIO3_REG) << 24; return insn->n; } static int ii20k_init_module(struct comedi_device *dev, struct comedi_subdevice *s) { void __iomem *iobase = ii20k_module_iobase(dev, s); unsigned char id; int ret; id = readb(iobase + II20K_ID_REG); switch (id) { case II20K_ID_PCI20006M_1: case II20K_ID_PCI20006M_2: /* Analog Output subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = (id == II20K_ID_PCI20006M_2) ? 2 : 1; s->maxdata = 0xffff; s->range_table = &ii20k_ao_ranges; s->insn_write = ii20k_ao_insn_write; ret = comedi_alloc_subdev_readback(s); if (ret) return ret; break; case II20K_ID_PCI20341M_1: /* Analog Input subdevice */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_DIFF; s->n_chan = 4; s->maxdata = 0xffff; s->range_table = &ii20k_ai_ranges; s->insn_read = ii20k_ai_insn_read; break; default: s->type = COMEDI_SUBD_UNUSED; break; } return 0; } static int ii20k_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; unsigned int membase; unsigned char id; bool has_dio; int ret; membase = it->options[0]; if (!membase || (membase & ~(0x100000 - II20K_SIZE))) { dev_warn(dev->class_dev, "%s: invalid memory address specified\n", dev->board_name); return -EINVAL; } if (!request_mem_region(membase, II20K_SIZE, dev->board_name)) { dev_warn(dev->class_dev, "%s: I/O mem conflict (%#x,%u)\n", dev->board_name, membase, II20K_SIZE); return -EIO; } dev->iobase = membase; /* actually, a memory address */ dev->mmio = ioremap(membase, II20K_SIZE); if (!dev->mmio) return -ENOMEM; id = readb(dev->mmio + II20K_ID_REG); switch (id & II20K_ID_MASK) { case II20K_ID_PCI20001C_1A: has_dio = false; break; case II20K_ID_PCI20001C_2A: has_dio = true; break; default: return -ENODEV; } ret = comedi_alloc_subdevices(dev, 4); if (ret) return ret; s = &dev->subdevices[0]; if (id & II20K_ID_MOD1_EMPTY) { s->type = COMEDI_SUBD_UNUSED; } else { ret = ii20k_init_module(dev, s); if (ret) return ret; } s = &dev->subdevices[1]; if (id & II20K_ID_MOD2_EMPTY) { s->type = COMEDI_SUBD_UNUSED; } else { ret = ii20k_init_module(dev, s); if (ret) return ret; } s = &dev->subdevices[2]; if (id & II20K_ID_MOD3_EMPTY) { s->type = COMEDI_SUBD_UNUSED; } else { ret = ii20k_init_module(dev, s); if (ret) return ret; } /* Digital I/O subdevice */ s = &dev->subdevices[3]; if (has_dio) { s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 32; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = ii20k_dio_insn_bits; s->insn_config = ii20k_dio_insn_config; /* default all channels to input */ ii20k_dio_config(dev, s); } else { s->type = COMEDI_SUBD_UNUSED; } return 0; } static void ii20k_detach(struct comedi_device *dev) { if (dev->mmio) iounmap(dev->mmio); if (dev->iobase) /* actually, a memory address */ release_mem_region(dev->iobase, II20K_SIZE); } static struct comedi_driver ii20k_driver = { .driver_name = "ii_pci20kc", .module = THIS_MODULE, .attach = ii20k_attach, .detach = ii20k_detach, }; module_comedi_driver(ii20k_driver); MODULE_AUTHOR("Comedi https://www.comedi.org"); MODULE_DESCRIPTION("Comedi driver for Intelligent Instruments PCI-20001C"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 OR MIT /* * Apple T8010 "A10" SoC * * Other names: H9P, "Cayman" * * Copyright (c) 2022, Konrad Dybcio <[email protected]> */ #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/interrupt-controller/apple-aic.h> #include <dt-bindings/interrupt-controller/irq.h> #include <dt-bindings/pinctrl/apple.h> / { interrupt-parent = <&aic>; #address-cells = <2>; #size-cells = <2>; clkref: clock-ref { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <24000000>; clock-output-names = "clkref"; }; cpus { #address-cells = <2>; #size-cells = <0>; cpu0: cpu@0 { compatible = "apple,hurricane-zephyr"; reg = <0x0 0x0>; cpu-release-addr = <0 0>; /* To be filled by loader */ enable-method = "spin-table"; device_type = "cpu"; }; cpu1: cpu@1 { compatible = "apple,hurricane-zephyr"; reg = <0x0 0x1>; cpu-release-addr = <0 0>; /* To be filled by loader */ enable-method = "spin-table"; device_type = "cpu"; }; }; soc { compatible = "simple-bus"; #address-cells = <2>; #size-cells = <2>; nonposted-mmio; ranges; serial0: serial@20a0c0000 { compatible = "apple,s5l-uart"; reg = <0x2 0x0a0c0000 0x0 0x4000>; reg-io-width = <4>; interrupt-parent = <&aic>; interrupts = <AIC_IRQ 218 IRQ_TYPE_LEVEL_HIGH>; /* Use the bootloader-enabled clocks for now. */ clocks = <&clkref>, <&clkref>; clock-names = "uart", "clk_uart_baud0"; status = "disabled"; }; aic: interrupt-controller@20e100000 { compatible = "apple,t8010-aic", "apple,aic"; reg = <0x2 0x0e100000 0x0 0x100000>; #interrupt-cells = <3>; interrupt-controller; }; pinctrl_ap: pinctrl@20f100000 { compatible = "apple,t8010-pinctrl", "apple,pinctrl"; reg = <0x2 0x0f100000 0x0 0x100000>; gpio-controller; #gpio-cells = <2>; gpio-ranges = <&pinctrl_ap 0 0 208>; apple,npins = <208>; interrupt-controller; #interrupt-cells = <2>; interrupt-parent = <&aic>; interrupts = <AIC_IRQ 42 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 43 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 44 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 45 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 46 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 47 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 48 IRQ_TYPE_LEVEL_HIGH>; }; pinctrl_aop: pinctrl@2100f0000 { compatible = "apple,t8010-pinctrl", "apple,pinctrl"; reg = <0x2 0x100f0000 0x0 0x100000>; gpio-controller; #gpio-cells = <2>; gpio-ranges = <&pinctrl_aop 0 0 42>; apple,npins = <42>; interrupt-controller; #interrupt-cells = <2>; interrupt-parent = <&aic>; interrupts = <AIC_IRQ 128 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 129 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 130 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 131 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 132 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 133 IRQ_TYPE_LEVEL_HIGH>, <AIC_IRQ 134 IRQ_TYPE_LEVEL_HIGH>; }; wdt: watchdog@2102b0000 { compatible = "apple,t8010-wdt", "apple,wdt"; reg = <0x2 0x102b0000 0x0 0x4000>; clocks = <&clkref>; interrupt-parent = <&aic>; interrupts = <AIC_IRQ 4 IRQ_TYPE_LEVEL_HIGH>; }; }; timer { compatible = "arm,armv8-timer"; interrupt-parent = <&aic>; interrupt-names = "phys", "virt"; /* Note that A10 doesn't actually have a hypervisor (EL2 is not implemented). */ interrupts = <AIC_FIQ AIC_TMR_GUEST_PHYS IRQ_TYPE_LEVEL_HIGH>, <AIC_FIQ AIC_TMR_GUEST_VIRT IRQ_TYPE_LEVEL_HIGH>; }; };
/* * linux/fs/nls/mac-roman.c * * Charset macroman translation tables. * Generated automatically from the Unicode and charset * tables from the Unicode Organization (www.unicode.org). * The Unicode to charset table has only exact mappings. */ /* * COPYRIGHT AND PERMISSION NOTICE * * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under * the Terms of Use in http://www.unicode.org/copyright.html. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of the Unicode data files and any associated documentation (the "Data * Files") or Unicode software and any associated documentation (the * "Software") to deal in the Data Files or Software without restriction, * including without limitation the rights to use, copy, modify, merge, * publish, distribute, and/or sell copies of the Data Files or Software, and * to permit persons to whom the Data Files or Software are furnished to do * so, provided that (a) the above copyright notice(s) and this permission * notice appear with all copies of the Data Files or Software, (b) both the * above copyright notice(s) and this permission notice appear in associated * documentation, and (c) there is clear notice in each modified Data File or * in the Software as well as in the documentation associated with the Data * File(s) or Software that the data or software has been modified. * * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THE DATA FILES OR SOFTWARE. * * Except as contained in this notice, the name of a copyright holder shall * not be used in advertising or otherwise to promote the sale, use or other * dealings in these Data Files or Software without prior written * authorization of the copyright holder. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nls.h> #include <linux/errno.h> static const wchar_t charset2uni[256] = { /* 0x00 */ 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, /* 0x10 */ 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x001f, /* 0x20 */ 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, /* 0x30 */ 0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037, 0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f, /* 0x40 */ 0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047, 0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f, /* 0x50 */ 0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057, 0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f, /* 0x60 */ 0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067, 0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f, /* 0x70 */ 0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077, 0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x007f, /* 0x80 */ 0x00c4, 0x00c5, 0x00c7, 0x00c9, 0x00d1, 0x00d6, 0x00dc, 0x00e1, 0x00e0, 0x00e2, 0x00e4, 0x00e3, 0x00e5, 0x00e7, 0x00e9, 0x00e8, /* 0x90 */ 0x00ea, 0x00eb, 0x00ed, 0x00ec, 0x00ee, 0x00ef, 0x00f1, 0x00f3, 0x00f2, 0x00f4, 0x00f6, 0x00f5, 0x00fa, 0x00f9, 0x00fb, 0x00fc, /* 0xa0 */ 0x2020, 0x00b0, 0x00a2, 0x00a3, 0x00a7, 0x2022, 0x00b6, 0x00df, 0x00ae, 0x00a9, 0x2122, 0x00b4, 0x00a8, 0x2260, 0x00c6, 0x00d8, /* 0xb0 */ 0x221e, 0x00b1, 0x2264, 0x2265, 0x00a5, 0x00b5, 0x2202, 0x2211, 0x220f, 0x03c0, 0x222b, 0x00aa, 0x00ba, 0x03a9, 0x00e6, 0x00f8, /* 0xc0 */ 0x00bf, 0x00a1, 0x00ac, 0x221a, 0x0192, 0x2248, 0x2206, 0x00ab, 0x00bb, 0x2026, 0x00a0, 0x00c0, 0x00c3, 0x00d5, 0x0152, 0x0153, /* 0xd0 */ 0x2013, 0x2014, 0x201c, 0x201d, 0x2018, 0x2019, 0x00f7, 0x25ca, 0x00ff, 0x0178, 0x2044, 0x20ac, 0x2039, 0x203a, 0xfb01, 0xfb02, /* 0xe0 */ 0x2021, 0x00b7, 0x201a, 0x201e, 0x2030, 0x00c2, 0x00ca, 0x00c1, 0x00cb, 0x00c8, 0x00cd, 0x00ce, 0x00cf, 0x00cc, 0x00d3, 0x00d4, /* 0xf0 */ 0xf8ff, 0x00d2, 0x00da, 0x00db, 0x00d9, 0x0131, 0x02c6, 0x02dc, 0x00af, 0x02d8, 0x02d9, 0x02da, 0x00b8, 0x02dd, 0x02db, 0x02c7, }; static const unsigned char page00[256] = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */ 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */ 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */ 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */ 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */ 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */ 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */ 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */ 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */ 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */ 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */ 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */ 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */ 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */ 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */ 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */ 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */ 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */ 0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */ 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */ 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */ 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */ 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */ 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */ 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */ 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */ 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */ }; static const unsigned char page01[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page02[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page03[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page20[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */ 0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page21[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page22[256] = { 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */ 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char page25[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char pagef8[256] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */ }; static const unsigned char pagefb[256] = { 0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */ }; static const unsigned char *const page_uni2charset[256] = { page00, page01, page02, page03, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, page20, page21, page22, NULL, NULL, page25, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, pagef8, NULL, NULL, pagefb, NULL, NULL, NULL, NULL, }; static const unsigned char charset2lower[256] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */ }; static const unsigned char charset2upper[256] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */ }; static int uni2char(wchar_t uni, unsigned char *out, int boundlen) { const unsigned char *uni2charset; unsigned char cl = uni & 0x00ff; unsigned char ch = (uni & 0xff00) >> 8; if (boundlen <= 0) return -ENAMETOOLONG; uni2charset = page_uni2charset[ch]; if (uni2charset && uni2charset[cl]) out[0] = uni2charset[cl]; else return -EINVAL; return 1; } static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni) { *uni = charset2uni[*rawstring]; if (*uni == 0x0000) return -EINVAL; return 1; } static struct nls_table table = { .charset = "macroman", .uni2char = uni2char, .char2uni = char2uni, .charset2lower = charset2lower, .charset2upper = charset2upper, }; static int __init init_nls_macroman(void) { return register_nls(&table); } static void __exit exit_nls_macroman(void) { unregister_nls(&table); } module_init(init_nls_macroman) module_exit(exit_nls_macroman) MODULE_DESCRIPTION("NLS Codepage macroman"); MODULE_LICENSE("Dual BSD/GPL");
// SPDX-License-Identifier: GPL-2.0 /* * linux/arch/sparc/kernel/setup.c * * Copyright (C) 1995 David S. Miller ([email protected]) * Copyright (C) 2000 Anton Blanchard ([email protected]) */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/slab.h> #include <linux/initrd.h> #include <asm/smp.h> #include <linux/user.h> #include <linux/delay.h> #include <linux/fs.h> #include <linux/seq_file.h> #include <linux/syscalls.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/string.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/console.h> #include <linux/spinlock.h> #include <linux/root_dev.h> #include <linux/cpu.h> #include <linux/kdebug.h> #include <linux/export.h> #include <linux/start_kernel.h> #include <uapi/linux/mount.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/oplib.h> #include <asm/page.h> #include <asm/traps.h> #include <asm/vaddrs.h> #include <asm/mbus.h> #include <asm/idprom.h> #include <asm/cpudata.h> #include <asm/setup.h> #include <asm/cacheflush.h> #include <asm/sections.h> #include "kernel.h" /* Typing sync at the prom prompt calls the function pointed to by * romvec->pv_synchook which I set to the following function. * This should sync all filesystems and return, for now it just * prints out pretty messages and returns. */ /* Pretty sick eh? */ static void prom_sync_me(void) { unsigned long prom_tbr, flags; /* XXX Badly broken. FIX! - Anton */ local_irq_save(flags); __asm__ __volatile__("rd %%tbr, %0\n\t" : "=r" (prom_tbr)); __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t" "nop\n\t" "nop\n\t" "nop\n\t" : : "r" (&trapbase[0])); prom_printf("PROM SYNC COMMAND...\n"); show_mem(); if (!is_idle_task(current)) { local_irq_enable(); ksys_sync(); local_irq_disable(); } prom_printf("Returning to prom\n"); __asm__ __volatile__("wr %0, 0x0, %%tbr\n\t" "nop\n\t" "nop\n\t" "nop\n\t" : : "r" (prom_tbr)); local_irq_restore(flags); } static unsigned int boot_flags __initdata = 0; #define BOOTME_DEBUG 0x1 /* Exported for mm/init.c:paging_init. */ unsigned long cmdline_memory_size __initdata = 0; /* which CPU booted us (0xff = not set) */ unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */ static void prom_console_write(struct console *con, const char *s, unsigned int n) { prom_write(s, n); } static struct console prom_early_console = { .name = "earlyprom", .write = prom_console_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1, }; /* * Process kernel command line switches that are specific to the * SPARC or that require special low-level processing. */ static void __init process_switch(char c) { switch (c) { case 'd': boot_flags |= BOOTME_DEBUG; break; case 's': break; case 'h': prom_printf("boot_flags_init: Halt!\n"); prom_halt(); break; case 'p': prom_early_console.flags &= ~CON_BOOT; break; default: printk("Unknown boot switch (-%c)\n", c); break; } } static void __init boot_flags_init(char *commands) { while (*commands) { /* Move to the start of the next "argument". */ while (*commands == ' ') commands++; /* Process any command switches, otherwise skip it. */ if (*commands == '\0') break; if (*commands == '-') { commands++; while (*commands && *commands != ' ') process_switch(*commands++); continue; } if (!strncmp(commands, "mem=", 4)) { /* * "mem=XXX[kKmM] overrides the PROM-reported * memory size. */ cmdline_memory_size = simple_strtoul(commands + 4, &commands, 0); if (*commands == 'K' || *commands == 'k') { cmdline_memory_size <<= 10; commands++; } else if (*commands=='M' || *commands=='m') { cmdline_memory_size <<= 20; commands++; } } while (*commands && *commands != ' ') commands++; } } extern unsigned short root_flags; extern unsigned short root_dev; extern unsigned short ram_flags; #define RAMDISK_IMAGE_START_MASK 0x07FF #define RAMDISK_PROMPT_FLAG 0x8000 #define RAMDISK_LOAD_FLAG 0x4000 extern int root_mountflags; char reboot_command[COMMAND_LINE_SIZE]; struct cpuid_patch_entry { unsigned int addr; unsigned int sun4d[3]; unsigned int leon[3]; }; extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; static void __init per_cpu_patch(void) { struct cpuid_patch_entry *p; if (sparc_cpu_model == sun4m) { /* Nothing to do, this is what the unpatched code * targets. */ return; } p = &__cpuid_patch; while (p < &__cpuid_patch_end) { unsigned long addr = p->addr; unsigned int *insns; switch (sparc_cpu_model) { case sun4d: insns = &p->sun4d[0]; break; case sparc_leon: insns = &p->leon[0]; break; default: prom_printf("Unknown cpu type, halting.\n"); prom_halt(); } *(unsigned int *) (addr + 0) = insns[0]; flushi(addr + 0); *(unsigned int *) (addr + 4) = insns[1]; flushi(addr + 4); *(unsigned int *) (addr + 8) = insns[2]; flushi(addr + 8); p++; } } struct leon_1insn_patch_entry { unsigned int addr; unsigned int insn; }; enum sparc_cpu sparc_cpu_model; EXPORT_SYMBOL(sparc_cpu_model); static __init void leon_patch(void) { struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch; struct leon_1insn_patch_entry *end = (void *)__leon_1insn_patch_end; /* Default instruction is leon - no patching */ if (sparc_cpu_model == sparc_leon) return; while (start < end) { unsigned long addr = start->addr; *(unsigned int *)(addr) = start->insn; flushi(addr); start++; } } struct tt_entry *sparc_ttable; /* Called from head_32.S - before we have setup anything * in the kernel. Be very careful with what you do here. */ void __init sparc32_start_kernel(struct linux_romvec *rp) { prom_init(rp); /* Set sparc_cpu_model */ sparc_cpu_model = sun_unknown; if (!strcmp(&cputypval[0], "sun4m")) sparc_cpu_model = sun4m; if (!strcmp(&cputypval[0], "sun4s")) sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */ if (!strcmp(&cputypval[0], "sun4d")) sparc_cpu_model = sun4d; if (!strcmp(&cputypval[0], "sun4e")) sparc_cpu_model = sun4e; if (!strcmp(&cputypval[0], "sun4u")) sparc_cpu_model = sun4u; if (!strncmp(&cputypval[0], "leon" , 4)) sparc_cpu_model = sparc_leon; leon_patch(); start_kernel(); } void __init setup_arch(char **cmdline_p) { int i; unsigned long highest_paddr; sparc_ttable = &trapbase[0]; /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); strscpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); parse_early_param(); boot_flags_init(*cmdline_p); register_console(&prom_early_console); switch(sparc_cpu_model) { case sun4m: pr_info("ARCH: SUN4M\n"); break; case sun4d: pr_info("ARCH: SUN4D\n"); break; case sun4e: pr_info("ARCH: SUN4E\n"); break; case sun4u: pr_info("ARCH: SUN4U\n"); break; case sparc_leon: pr_info("ARCH: LEON\n"); break; default: pr_info("ARCH: UNKNOWN!\n"); break; } idprom_init(); load_mmu(); phys_base = 0xffffffffUL; highest_paddr = 0UL; for (i = 0; sp_banks[i].num_bytes != 0; i++) { unsigned long top; if (sp_banks[i].base_addr < phys_base) phys_base = sp_banks[i].base_addr; top = sp_banks[i].base_addr + sp_banks[i].num_bytes; if (highest_paddr < top) highest_paddr = top; } pfn_base = phys_base >> PAGE_SHIFT; if (!root_flags) root_mountflags &= ~MS_RDONLY; ROOT_DEV = old_decode_dev(root_dev); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK; #endif prom_setsync(prom_sync_me); if((boot_flags & BOOTME_DEBUG) && (linux_dbvec != NULL) && ((*(short *)linux_dbvec) != -1)) { printk("Booted under KADB. Syncing trap table.\n"); (*(linux_dbvec->teach_debugger))(); } /* Run-time patch instructions to match the cpu model */ per_cpu_patch(); paging_init(); smp_setup_cpu_possible_map(); } extern int stop_a_enabled; void sun_do_break(void) { if (!stop_a_enabled) return; printk("\n"); flush_user_windows(); prom_cmdline(); } EXPORT_SYMBOL(sun_do_break); int stop_a_enabled = 1; static int __init topology_init(void) { int i, ncpus, err; /* Count the number of physically present processors in * the machine, even on uniprocessor, so that /proc/cpuinfo * output is consistent with 2.4.x */ ncpus = 0; while (!cpu_find_by_instance(ncpus, NULL, NULL)) ncpus++; ncpus_probed = ncpus; err = 0; for_each_online_cpu(i) { struct cpu *p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) err = -ENOMEM; else register_cpu(p, i); } return err; } subsys_initcall(topology_init); #if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP) void __init arch_cpu_finalize_init(void) { cpu_data(0).udelay_val = loops_per_jiffy; } #endif
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2021, Intel Corporation. */ #ifndef _STMMAC_XDP_H_ #define _STMMAC_XDP_H_ #define STMMAC_MAX_RX_BUF_SIZE(num) (((num) * PAGE_SIZE) - XDP_PACKET_HEADROOM) #define STMMAC_RX_DMA_ATTR (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) int stmmac_xdp_setup_pool(struct stmmac_priv *priv, struct xsk_buff_pool *pool, u16 queue); int stmmac_xdp_set_prog(struct stmmac_priv *priv, struct bpf_prog *prog, struct netlink_ext_ack *extack); #endif /* _STMMAC_XDP_H_ */
// SPDX-License-Identifier: GPL-2.0-only /* * OMAP5 Voltage Management Routines * * Based on voltagedomains44xx_data.c * * Copyright (C) 2013 Texas Instruments Incorporated - https://www.ti.com */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/init.h> #include "common.h" #include "prm54xx.h" #include "voltage.h" #include "omap_opp_data.h" #include "vc.h" #include "vp.h" static const struct omap_vfsm_instance omap5_vdd_mpu_vfsm = { .voltsetup_reg = OMAP54XX_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET, }; static const struct omap_vfsm_instance omap5_vdd_mm_vfsm = { .voltsetup_reg = OMAP54XX_PRM_VOLTSETUP_MM_RET_SLEEP_OFFSET, }; static const struct omap_vfsm_instance omap5_vdd_core_vfsm = { .voltsetup_reg = OMAP54XX_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET, }; static struct voltagedomain omap5_voltdm_mpu = { .name = "mpu", .scalable = true, .read = omap4_prm_vcvp_read, .write = omap4_prm_vcvp_write, .rmw = omap4_prm_vcvp_rmw, .vc = &omap4_vc_mpu, .vfsm = &omap5_vdd_mpu_vfsm, .vp = &omap4_vp_mpu, }; static struct voltagedomain omap5_voltdm_mm = { .name = "mm", .scalable = true, .read = omap4_prm_vcvp_read, .write = omap4_prm_vcvp_write, .rmw = omap4_prm_vcvp_rmw, .vc = &omap4_vc_iva, .vfsm = &omap5_vdd_mm_vfsm, .vp = &omap4_vp_iva, }; static struct voltagedomain omap5_voltdm_core = { .name = "core", .scalable = true, .read = omap4_prm_vcvp_read, .write = omap4_prm_vcvp_write, .rmw = omap4_prm_vcvp_rmw, .vc = &omap4_vc_core, .vfsm = &omap5_vdd_core_vfsm, .vp = &omap4_vp_core, }; static struct voltagedomain omap5_voltdm_wkup = { .name = "wkup", }; static struct voltagedomain *voltagedomains_omap5[] __initdata = { &omap5_voltdm_mpu, &omap5_voltdm_mm, &omap5_voltdm_core, &omap5_voltdm_wkup, NULL, }; static const char *const sys_clk_name __initconst = "sys_clkin"; void __init omap54xx_voltagedomains_init(void) { struct voltagedomain *voltdm; int i; for (i = 0; voltdm = voltagedomains_omap5[i], voltdm; i++) voltdm->sys_clk.name = sys_clk_name; voltdm_init(voltagedomains_omap5); };
/* SPDX-License-Identifier: GPL-2.0 */ /* * pcic.h: JavaEngine 1 specific PCI definitions. * * Copyright (C) 1998 V. Roganov and G. Raiko */ #ifndef __SPARC_PCIC_H #define __SPARC_PCIC_H #ifndef __ASSEMBLY__ #include <linux/types.h> #include <linux/smp.h> #include <linux/pci.h> #include <linux/ioport.h> #include <asm/pbm.h> struct linux_pcic { void __iomem *pcic_regs; unsigned long pcic_io; void __iomem *pcic_config_space_addr; void __iomem *pcic_config_space_data; struct resource pcic_res_regs; struct resource pcic_res_io; struct resource pcic_res_cfg_addr; struct resource pcic_res_cfg_data; struct linux_pbm_info pbm; struct pcic_ca2irq *pcic_imap; int pcic_imdim; }; #ifdef CONFIG_PCIC_PCI int pcic_present(void); int pcic_probe(void); void pci_time_init(void); void sun4m_pci_init_IRQ(void); #else static inline int pcic_present(void) { return 0; } static inline int pcic_probe(void) { return 0; } static inline void pci_time_init(void) {} static inline void sun4m_pci_init_IRQ(void) {} #endif #endif /* Size of PCI I/O space which we relocate. */ #define PCI_SPACE_SIZE 0x1000000 /* 16 MB */ /* PCIC Register Set. */ #define PCI_DIAGNOSTIC_0 0x40 /* 32 bits */ #define PCI_SIZE_0 0x44 /* 32 bits */ #define PCI_SIZE_1 0x48 /* 32 bits */ #define PCI_SIZE_2 0x4c /* 32 bits */ #define PCI_SIZE_3 0x50 /* 32 bits */ #define PCI_SIZE_4 0x54 /* 32 bits */ #define PCI_SIZE_5 0x58 /* 32 bits */ #define PCI_PIO_CONTROL 0x60 /* 8 bits */ #define PCI_DVMA_CONTROL 0x62 /* 8 bits */ #define PCI_DVMA_CONTROL_INACTIVITY_REQ (1<<0) #define PCI_DVMA_CONTROL_IOTLB_ENABLE (1<<0) #define PCI_DVMA_CONTROL_IOTLB_DISABLE 0 #define PCI_DVMA_CONTROL_INACTIVITY_ACK (1<<4) #define PCI_INTERRUPT_CONTROL 0x63 /* 8 bits */ #define PCI_CPU_INTERRUPT_PENDING 0x64 /* 32 bits */ #define PCI_DIAGNOSTIC_1 0x68 /* 16 bits */ #define PCI_SOFTWARE_INT_CLEAR 0x6a /* 16 bits */ #define PCI_SOFTWARE_INT_SET 0x6e /* 16 bits */ #define PCI_SYS_INT_PENDING 0x70 /* 32 bits */ #define PCI_SYS_INT_PENDING_PIO 0x40000000 #define PCI_SYS_INT_PENDING_DMA 0x20000000 #define PCI_SYS_INT_PENDING_PCI 0x10000000 #define PCI_SYS_INT_PENDING_APSR 0x08000000 #define PCI_SYS_INT_TARGET_MASK 0x74 /* 32 bits */ #define PCI_SYS_INT_TARGET_MASK_CLEAR 0x78 /* 32 bits */ #define PCI_SYS_INT_TARGET_MASK_SET 0x7c /* 32 bits */ #define PCI_SYS_INT_PENDING_CLEAR 0x83 /* 8 bits */ #define PCI_SYS_INT_PENDING_CLEAR_ALL 0x80 #define PCI_SYS_INT_PENDING_CLEAR_PIO 0x40 #define PCI_SYS_INT_PENDING_CLEAR_DMA 0x20 #define PCI_SYS_INT_PENDING_CLEAR_PCI 0x10 #define PCI_IOTLB_CONTROL 0x84 /* 8 bits */ #define PCI_INT_SELECT_LO 0x88 /* 16 bits */ #define PCI_ARBITRATION_SELECT 0x8a /* 16 bits */ #define PCI_INT_SELECT_HI 0x8c /* 16 bits */ #define PCI_HW_INT_OUTPUT 0x8e /* 16 bits */ #define PCI_IOTLB_RAM_INPUT 0x90 /* 32 bits */ #define PCI_IOTLB_CAM_INPUT 0x94 /* 32 bits */ #define PCI_IOTLB_RAM_OUTPUT 0x98 /* 32 bits */ #define PCI_IOTLB_CAM_OUTPUT 0x9c /* 32 bits */ #define PCI_SMBAR0 0xa0 /* 8 bits */ #define PCI_MSIZE0 0xa1 /* 8 bits */ #define PCI_PMBAR0 0xa2 /* 8 bits */ #define PCI_SMBAR1 0xa4 /* 8 bits */ #define PCI_MSIZE1 0xa5 /* 8 bits */ #define PCI_PMBAR1 0xa6 /* 8 bits */ #define PCI_SIBAR 0xa8 /* 8 bits */ #define PCI_SIBAR_ADDRESS_MASK 0xf #define PCI_ISIZE 0xa9 /* 8 bits */ #define PCI_ISIZE_16M 0xf #define PCI_ISIZE_32M 0xe #define PCI_ISIZE_64M 0xc #define PCI_ISIZE_128M 0x8 #define PCI_ISIZE_256M 0x0 #define PCI_PIBAR 0xaa /* 8 bits */ #define PCI_CPU_COUNTER_LIMIT_HI 0xac /* 32 bits */ #define PCI_CPU_COUNTER_LIMIT_LO 0xb0 /* 32 bits */ #define PCI_CPU_COUNTER_LIMIT 0xb4 /* 32 bits */ #define PCI_SYS_LIMIT 0xb8 /* 32 bits */ #define PCI_SYS_COUNTER 0xbc /* 32 bits */ #define PCI_SYS_COUNTER_OVERFLOW (1<<31) /* Limit reached */ #define PCI_SYS_LIMIT_PSEUDO 0xc0 /* 32 bits */ #define PCI_USER_TIMER_CONTROL 0xc4 /* 8 bits */ #define PCI_USER_TIMER_CONFIG 0xc5 /* 8 bits */ #define PCI_COUNTER_IRQ 0xc6 /* 8 bits */ #define PCI_COUNTER_IRQ_SET(sys_irq, cpu_irq) ((((sys_irq) & 0xf) << 4) | \ ((cpu_irq) & 0xf)) #define PCI_COUNTER_IRQ_SYS(v) (((v) >> 4) & 0xf) #define PCI_COUNTER_IRQ_CPU(v) ((v) & 0xf) #define PCI_PIO_ERROR_COMMAND 0xc7 /* 8 bits */ #define PCI_PIO_ERROR_ADDRESS 0xc8 /* 32 bits */ #define PCI_IOTLB_ERROR_ADDRESS 0xcc /* 32 bits */ #define PCI_SYS_STATUS 0xd0 /* 8 bits */ #define PCI_SYS_STATUS_RESET_ENABLE (1<<0) #define PCI_SYS_STATUS_RESET (1<<1) #define PCI_SYS_STATUS_WATCHDOG_RESET (1<<4) #define PCI_SYS_STATUS_PCI_RESET (1<<5) #define PCI_SYS_STATUS_PCI_RESET_ENABLE (1<<6) #define PCI_SYS_STATUS_PCI_SATTELITE_MODE (1<<7) #endif /* !(__SPARC_PCIC_H) */
// SPDX-License-Identifier: GPL-2.0-or-later /* * IPv6 input * Linux INET6 implementation * * Authors: * Pedro Roque <[email protected]> * Ian P. Morris <[email protected]> * * Based in linux/net/ipv4/ip_input.c */ /* Changes * * Mitsuru KANDA @USAGI and * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs(). */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/icmpv6.h> #include <linux/mroute6.h> #include <linux/slab.h> #include <linux/indirect_call_wrapper.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <net/sock.h> #include <net/snmp.h> #include <net/udp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/xfrm.h> #include <net/inet_ecn.h> #include <net/dst_metadata.h> static void ip6_rcv_finish_core(struct net *net, struct sock *sk, struct sk_buff *skb) { if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) && !skb_dst(skb) && !skb->sk) { switch (ipv6_hdr(skb)->nexthdr) { case IPPROTO_TCP: if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) tcp_v6_early_demux(skb); break; case IPPROTO_UDP: if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) udp_v6_early_demux(skb); break; } } if (!skb_valid_dst(skb)) ip6_route_input(skb); } int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { /* if ingress device is enslaved to an L3 master device pass the * skb to its handler for processing */ skb = l3mdev_ip6_rcv(skb); if (!skb) return NET_RX_SUCCESS; ip6_rcv_finish_core(net, sk, skb); return dst_input(skb); } static void ip6_sublist_rcv_finish(struct list_head *head) { struct sk_buff *skb, *next; list_for_each_entry_safe(skb, next, head, list) { skb_list_del_init(skb); dst_input(skb); } } static bool ip6_can_use_hint(const struct sk_buff *skb, const struct sk_buff *hint) { return hint && !skb_dst(skb) && ipv6_addr_equal(&ipv6_hdr(hint)->daddr, &ipv6_hdr(skb)->daddr); } static struct sk_buff *ip6_extract_route_hint(const struct net *net, struct sk_buff *skb) { if (fib6_routes_require_src(net) || fib6_has_custom_rules(net) || IP6CB(skb)->flags & IP6SKB_MULTIPATH) return NULL; return skb; } static void ip6_list_rcv_finish(struct net *net, struct sock *sk, struct list_head *head) { struct sk_buff *skb, *next, *hint = NULL; struct dst_entry *curr_dst = NULL; LIST_HEAD(sublist); list_for_each_entry_safe(skb, next, head, list) { struct dst_entry *dst; skb_list_del_init(skb); /* if ingress device is enslaved to an L3 master device pass the * skb to its handler for processing */ skb = l3mdev_ip6_rcv(skb); if (!skb) continue; if (ip6_can_use_hint(skb, hint)) skb_dst_copy(skb, hint); else ip6_rcv_finish_core(net, sk, skb); dst = skb_dst(skb); if (curr_dst != dst) { hint = ip6_extract_route_hint(net, skb); /* dispatch old sublist */ if (!list_empty(&sublist)) ip6_sublist_rcv_finish(&sublist); /* start new sublist */ INIT_LIST_HEAD(&sublist); curr_dst = dst; } list_add_tail(&skb->list, &sublist); } /* dispatch final sublist */ ip6_sublist_rcv_finish(&sublist); } static struct sk_buff *ip6_rcv_core(struct sk_buff *skb, struct net_device *dev, struct net *net) { enum skb_drop_reason reason; const struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; if (skb->pkt_type == PACKET_OTHERHOST) { dev_core_stats_rx_otherhost_dropped_inc(skb->dev); kfree_skb_reason(skb, SKB_DROP_REASON_OTHERHOST); return NULL; } rcu_read_lock(); idev = __in6_dev_get(skb->dev); __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len); SKB_DR_SET(reason, NOT_SPECIFIED); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(READ_ONCE(idev->cnf.disable_ipv6))) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); if (idev && unlikely(READ_ONCE(idev->cnf.disable_ipv6))) SKB_DR_SET(reason, IPV6DISABLED); goto drop; } memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex; if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) goto err; hdr = ipv6_hdr(skb); if (hdr->version != 6) { SKB_DR_SET(reason, UNHANDLED_PROTO); goto err; } __IP6_ADD_STATS(net, idev, IPSTATS_MIB_NOECTPKTS + (ipv6_get_dsfield(hdr) & INET_ECN_MASK), max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); /* * RFC4291 2.5.3 * The loopback address must not be used as the source address in IPv6 * packets that are sent outside of a single node. [..] * A packet received on an interface with a destination address * of loopback must be dropped. */ if ((ipv6_addr_loopback(&hdr->saddr) || ipv6_addr_loopback(&hdr->daddr)) && !(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev)) goto err; /* RFC4291 Errata ID: 3480 * Interface-Local scope spans only a single interface on a * node and is useful only for loopback transmission of * multicast. Packets with interface-local scope received * from another node must be discarded. */ if (!(skb->pkt_type == PACKET_LOOPBACK || dev->flags & IFF_LOOPBACK) && ipv6_addr_is_multicast(&hdr->daddr) && IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) goto err; /* If enabled, drop unicast packets that were encapsulated in link-layer * multicast or broadcast to protected against the so-called "hole-196" * attack in 802.11 wireless. */ if (!ipv6_addr_is_multicast(&hdr->daddr) && (skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST) && READ_ONCE(idev->cnf.drop_unicast_in_l2_multicast)) { SKB_DR_SET(reason, UNICAST_IN_L2_MULTICAST); goto err; } /* RFC4291 2.7 * Nodes must not originate a packet to a multicast address whose scope * field contains the reserved value 0; if such a packet is received, it * must be silently dropped. */ if (ipv6_addr_is_multicast(&hdr->daddr) && IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0) goto err; /* * RFC4291 2.7 * Multicast addresses must not be used as source addresses in IPv6 * packets or appear in any Routing header. */ if (ipv6_addr_is_multicast(&hdr->saddr)) goto err; skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS); SKB_DR_SET(reason, PKT_TOO_SMALL); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) goto err; hdr = ipv6_hdr(skb); } if (hdr->nexthdr == NEXTHDR_HOP) { if (ipv6_parse_hopopts(skb) < 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); return NULL; } } rcu_read_unlock(); /* Must drop socket now because of tproxy. */ if (!skb_sk_is_prefetched(skb)) skb_orphan(skb); return skb; err: __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); SKB_DR_OR(reason, IP_INHDR); drop: rcu_read_unlock(); kfree_skb_reason(skb, reason); return NULL; } int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { struct net *net = dev_net(skb->dev); skb = ip6_rcv_core(skb, dev, net); if (skb == NULL) return NET_RX_DROP; return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL, skb, dev, NULL, ip6_rcv_finish); } static void ip6_sublist_rcv(struct list_head *head, struct net_device *dev, struct net *net) { NF_HOOK_LIST(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL, head, dev, NULL, ip6_rcv_finish); ip6_list_rcv_finish(net, NULL, head); } /* Receive a list of IPv6 packets */ void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, struct net_device *orig_dev) { struct net_device *curr_dev = NULL; struct net *curr_net = NULL; struct sk_buff *skb, *next; LIST_HEAD(sublist); list_for_each_entry_safe(skb, next, head, list) { struct net_device *dev = skb->dev; struct net *net = dev_net(dev); skb_list_del_init(skb); skb = ip6_rcv_core(skb, dev, net); if (skb == NULL) continue; if (curr_dev != dev || curr_net != net) { /* dispatch old sublist */ if (!list_empty(&sublist)) ip6_sublist_rcv(&sublist, curr_dev, curr_net); /* start new sublist */ INIT_LIST_HEAD(&sublist); curr_dev = dev; curr_net = net; } list_add_tail(&skb->list, &sublist); } /* dispatch final sublist */ if (!list_empty(&sublist)) ip6_sublist_rcv(&sublist, curr_dev, curr_net); } INDIRECT_CALLABLE_DECLARE(int tcp_v6_rcv(struct sk_buff *)); /* * Deliver the packet to the host */ void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr, bool have_final) { const struct inet6_protocol *ipprot; struct inet6_dev *idev; unsigned int nhoff; SKB_DR(reason); bool raw; /* * Parse extension headers */ resubmit: idev = ip6_dst_idev(skb_dst(skb)); nhoff = IP6CB(skb)->nhoff; if (!have_final) { if (!pskb_pull(skb, skb_transport_offset(skb))) goto discard; nexthdr = skb_network_header(skb)[nhoff]; } resubmit_final: raw = raw6_local_deliver(skb, nexthdr); ipprot = rcu_dereference(inet6_protos[nexthdr]); if (ipprot) { int ret; if (have_final) { if (!(ipprot->flags & INET6_PROTO_FINAL)) { /* Once we've seen a final protocol don't * allow encapsulation on any non-final * ones. This allows foo in UDP encapsulation * to work. */ goto discard; } } else if (ipprot->flags & INET6_PROTO_FINAL) { const struct ipv6hdr *hdr; int sdif = inet6_sdif(skb); struct net_device *dev; /* Only do this once for first final protocol */ have_final = true; skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); hdr = ipv6_hdr(skb); /* skb->dev passed may be master dev for vrfs. */ if (sdif) { dev = dev_get_by_index_rcu(net, sdif); if (!dev) goto discard; } else { dev = skb->dev; } if (ipv6_addr_is_multicast(&hdr->daddr) && !ipv6_chk_mcast_addr(dev, &hdr->daddr, &hdr->saddr) && !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) { SKB_DR_SET(reason, IP_INADDRERRORS); goto discard; } } if (!(ipprot->flags & INET6_PROTO_NOPOLICY)) { if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { SKB_DR_SET(reason, XFRM_POLICY); goto discard; } nf_reset_ct(skb); } ret = INDIRECT_CALL_2(ipprot->handler, tcp_v6_rcv, udpv6_rcv, skb); if (ret > 0) { if (ipprot->flags & INET6_PROTO_FINAL) { /* Not an extension header, most likely UDP * encapsulation. Use return value as nexthdr * protocol not nhoff (which presumably is * not set by handler). */ nexthdr = ret; goto resubmit_final; } else { goto resubmit; } } else if (ret == 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS); } } else { if (!raw) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INUNKNOWNPROTOS); icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_UNK_NEXTHDR, nhoff); SKB_DR_SET(reason, IP_NOPROTO); } else { SKB_DR_SET(reason, XFRM_POLICY); } kfree_skb_reason(skb, reason); } else { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS); consume_skb(skb); } } return; discard: __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); kfree_skb_reason(skb, reason); } static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { skb_clear_delivery_time(skb); rcu_read_lock(); ip6_protocol_deliver_rcu(net, skb, 0, false); rcu_read_unlock(); return 0; } int ip6_input(struct sk_buff *skb) { return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, dev_net(skb->dev), NULL, skb, skb->dev, NULL, ip6_input_finish); } EXPORT_SYMBOL_GPL(ip6_input); int ip6_mc_input(struct sk_buff *skb) { int sdif = inet6_sdif(skb); const struct ipv6hdr *hdr; struct net_device *dev; bool deliver; __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev), __in6_dev_get_safely(skb->dev), IPSTATS_MIB_INMCAST, skb->len); /* skb->dev passed may be master dev for vrfs. */ if (sdif) { rcu_read_lock(); dev = dev_get_by_index_rcu(dev_net(skb->dev), sdif); if (!dev) { rcu_read_unlock(); kfree_skb(skb); return -ENODEV; } } else { dev = skb->dev; } hdr = ipv6_hdr(skb); deliver = ipv6_chk_mcast_addr(dev, &hdr->daddr, NULL); if (sdif) rcu_read_unlock(); #ifdef CONFIG_IPV6_MROUTE /* * IPv6 multicast router mode is now supported ;) */ if (atomic_read(&dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding) && !(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate * packets. */ struct sk_buff *skb2; struct inet6_skb_parm *opt = IP6CB(skb); /* Check for MLD */ if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) { /* Check if this is a mld message */ u8 nexthdr = hdr->nexthdr; __be16 frag_off; int offset; /* Check if the value of Router Alert * is for MLD (0x0000). */ if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) { deliver = false; if (!ipv6_ext_hdr(nexthdr)) { /* BUG */ goto out; } offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off); if (offset < 0) goto out; if (ipv6_is_mld(skb, nexthdr, offset)) deliver = true; goto out; } /* unknown RA - process it normally */ } if (deliver) skb2 = skb_clone(skb, GFP_ATOMIC); else { skb2 = skb; skb = NULL; } if (skb2) { ip6_mr_input(skb2); } } out: #endif if (likely(deliver)) ip6_input(skb); else { /* discard */ kfree_skb(skb); } return 0; }
// SPDX-License-Identifier: GPL-2.0-only /* * Secure Digital Host Controller Interface ACPI driver. * * Copyright (c) 2012, Intel Corporation. */ #include <linux/bitfield.h> #include <linux/init.h> #include <linux/export.h> #include <linux/module.h> #include <linux/device.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/platform_device.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/compiler.h> #include <linux/stddef.h> #include <linux/bitops.h> #include <linux/types.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/acpi.h> #include <linux/pm.h> #include <linux/pm_runtime.h> #include <linux/delay.h> #include <linux/dmi.h> #include <linux/mmc/host.h> #include <linux/mmc/pm.h> #include <linux/mmc/slot-gpio.h> #ifdef CONFIG_X86 #include <linux/platform_data/x86/soc.h> #include <asm/iosf_mbi.h> #endif #include "sdhci.h" enum { SDHCI_ACPI_SD_CD = BIT(0), SDHCI_ACPI_RUNTIME_PM = BIT(1), SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL = BIT(2), }; struct sdhci_acpi_chip { const struct sdhci_ops *ops; unsigned int quirks; unsigned int quirks2; unsigned long caps; unsigned int caps2; mmc_pm_flag_t pm_caps; }; struct sdhci_acpi_slot { const struct sdhci_acpi_chip *chip; unsigned int quirks; unsigned int quirks2; unsigned long caps; unsigned int caps2; mmc_pm_flag_t pm_caps; unsigned int flags; size_t priv_size; int (*probe_slot)(struct platform_device *, struct acpi_device *); int (*remove_slot)(struct platform_device *); int (*free_slot)(struct platform_device *pdev); int (*setup_host)(struct platform_device *pdev); }; struct sdhci_acpi_host { struct sdhci_host *host; const struct sdhci_acpi_slot *slot; struct platform_device *pdev; bool use_runtime_pm; bool is_intel; bool reset_signal_volt_on_suspend; unsigned long private[] ____cacheline_aligned; }; enum { DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP = BIT(0), DMI_QUIRK_SD_NO_WRITE_PROTECT = BIT(1), DMI_QUIRK_SD_CD_ACTIVE_HIGH = BIT(2), DMI_QUIRK_SD_CD_ENABLE_PULL_UP = BIT(3), }; static inline void *sdhci_acpi_priv(struct sdhci_acpi_host *c) { return (void *)c->private; } static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) { return c->slot && (c->slot->flags & flag); } #define INTEL_DSM_HS_CAPS_SDR25 BIT(0) #define INTEL_DSM_HS_CAPS_DDR50 BIT(1) #define INTEL_DSM_HS_CAPS_SDR50 BIT(2) #define INTEL_DSM_HS_CAPS_SDR104 BIT(3) enum { INTEL_DSM_FNS = 0, INTEL_DSM_V18_SWITCH = 3, INTEL_DSM_V33_SWITCH = 4, INTEL_DSM_HS_CAPS = 8, }; struct intel_host { u32 dsm_fns; u32 hs_caps; }; static const guid_t intel_dsm_guid = GUID_INIT(0xF6C13EA5, 0x65CD, 0x461F, 0xAB, 0x7A, 0x29, 0xF7, 0xE8, 0xD5, 0xBD, 0x61); static int __intel_dsm(struct intel_host *intel_host, struct device *dev, unsigned int fn, u32 *result) { union acpi_object *obj; int err = 0; obj = acpi_evaluate_dsm(ACPI_HANDLE(dev), &intel_dsm_guid, 0, fn, NULL); if (!obj) return -EOPNOTSUPP; if (obj->type == ACPI_TYPE_INTEGER) { *result = obj->integer.value; } else if (obj->type == ACPI_TYPE_BUFFER && obj->buffer.length > 0) { size_t len = min_t(size_t, obj->buffer.length, 4); *result = 0; memcpy(result, obj->buffer.pointer, len); } else { dev_err(dev, "%s DSM fn %u obj->type %d obj->buffer.length %d\n", __func__, fn, obj->type, obj->buffer.length); err = -EINVAL; } ACPI_FREE(obj); return err; } static int intel_dsm(struct intel_host *intel_host, struct device *dev, unsigned int fn, u32 *result) { if (fn > 31 || !(intel_host->dsm_fns & (1 << fn))) return -EOPNOTSUPP; return __intel_dsm(intel_host, dev, fn, result); } static void intel_dsm_init(struct intel_host *intel_host, struct device *dev, struct mmc_host *mmc) { int err; intel_host->hs_caps = ~0; err = __intel_dsm(intel_host, dev, INTEL_DSM_FNS, &intel_host->dsm_fns); if (err) { pr_debug("%s: DSM not supported, error %d\n", mmc_hostname(mmc), err); return; } pr_debug("%s: DSM function mask %#x\n", mmc_hostname(mmc), intel_host->dsm_fns); intel_dsm(intel_host, dev, INTEL_DSM_HS_CAPS, &intel_host->hs_caps); } static int intel_start_signal_voltage_switch(struct mmc_host *mmc, struct mmc_ios *ios) { struct device *dev = mmc_dev(mmc); struct sdhci_acpi_host *c = dev_get_drvdata(dev); struct intel_host *intel_host = sdhci_acpi_priv(c); unsigned int fn; u32 result = 0; int err; err = sdhci_start_signal_voltage_switch(mmc, ios); if (err) return err; switch (ios->signal_voltage) { case MMC_SIGNAL_VOLTAGE_330: fn = INTEL_DSM_V33_SWITCH; break; case MMC_SIGNAL_VOLTAGE_180: fn = INTEL_DSM_V18_SWITCH; break; default: return 0; } err = intel_dsm(intel_host, dev, fn, &result); pr_debug("%s: %s DSM fn %u error %d result %u\n", mmc_hostname(mmc), __func__, fn, err, result); return 0; } static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) { u8 reg; reg = sdhci_readb(host, SDHCI_POWER_CONTROL); reg |= 0x10; sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); /* For eMMC, minimum is 1us but give it 9us for good measure */ udelay(9); reg &= ~0x10; sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); /* For eMMC, minimum is 200us but give it 300us for good measure */ usleep_range(300, 1000); } static const struct sdhci_ops sdhci_acpi_ops_dflt = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_ops sdhci_acpi_ops_int = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_acpi_int_hw_reset, }; static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { .ops = &sdhci_acpi_ops_int, }; #ifdef CONFIG_X86 #define BYT_IOSF_SCCEP 0x63 #define BYT_IOSF_OCP_NETCTRL0 0x1078 #define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8) static void sdhci_acpi_byt_setting(struct device *dev) { u32 val = 0; if (!soc_intel_is_byt()) return; if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0, &val)) { dev_err(dev, "%s read error\n", __func__); return; } if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE)) return; val &= ~BYT_IOSF_OCP_TIMEOUT_BASE; if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0, val)) { dev_err(dev, "%s write error\n", __func__); return; } dev_dbg(dev, "%s completed\n", __func__); } static bool sdhci_acpi_byt_defer(struct device *dev) { if (!soc_intel_is_byt()) return false; if (!iosf_mbi_available()) return true; sdhci_acpi_byt_setting(dev); return false; } #else static inline void sdhci_acpi_byt_setting(struct device *dev) { } static inline bool sdhci_acpi_byt_defer(struct device *dev) { return false; } #endif static int bxt_get_cd(struct mmc_host *mmc) { int gpio_cd = mmc_gpio_get_cd(mmc); if (!gpio_cd) return 0; return sdhci_get_cd_nogpio(mmc); } static int intel_probe_slot(struct platform_device *pdev, struct acpi_device *adev) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); struct intel_host *intel_host = sdhci_acpi_priv(c); struct sdhci_host *host = c->host; if (acpi_dev_hid_uid_match(adev, "80860F14", "1") && sdhci_readl(host, SDHCI_CAPABILITIES) == 0x446cc8b2 && sdhci_readl(host, SDHCI_CAPABILITIES_1) == 0x00000807) host->timeout_clk = 1000; /* 1000 kHz i.e. 1 MHz */ if (acpi_dev_hid_uid_match(adev, "80865ACA", NULL)) host->mmc_host_ops.get_cd = bxt_get_cd; intel_dsm_init(intel_host, &pdev->dev, host->mmc); host->mmc_host_ops.start_signal_voltage_switch = intel_start_signal_voltage_switch; c->is_intel = true; return 0; } static int intel_setup_host(struct platform_device *pdev) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); struct intel_host *intel_host = sdhci_acpi_priv(c); if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR25)) c->host->mmc->caps &= ~MMC_CAP_UHS_SDR25; if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR50)) c->host->mmc->caps &= ~MMC_CAP_UHS_SDR50; if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_DDR50)) c->host->mmc->caps &= ~MMC_CAP_UHS_DDR50; if (!(intel_host->hs_caps & INTEL_DSM_HS_CAPS_SDR104)) c->host->mmc->caps &= ~MMC_CAP_UHS_SDR104; return 0; } static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { .chip = &sdhci_acpi_chip_int, .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET | MMC_CAP_1_8V_DDR | MMC_CAP_CMD_DURING_TFR | MMC_CAP_WAIT_WHILE_BUSY, .flags = SDHCI_ACPI_RUNTIME_PM, .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | SDHCI_QUIRK_NO_LED, .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | SDHCI_QUIRK2_STOP_WITH_TC | SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400, .probe_slot = intel_probe_slot, .setup_host = intel_setup_host, .priv_size = sizeof(struct intel_host), }; static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | SDHCI_QUIRK_NO_LED | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD | MMC_CAP_WAIT_WHILE_BUSY, .flags = SDHCI_ACPI_RUNTIME_PM, .pm_caps = MMC_PM_KEEP_POWER, .probe_slot = intel_probe_slot, .setup_host = intel_setup_host, .priv_size = sizeof(struct intel_host), }; static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL | SDHCI_ACPI_RUNTIME_PM, .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | SDHCI_QUIRK_NO_LED, .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON | SDHCI_QUIRK2_STOP_WITH_TC, .caps = MMC_CAP_WAIT_WHILE_BUSY | MMC_CAP_AGGRESSIVE_PM, .probe_slot = intel_probe_slot, .setup_host = intel_setup_host, .priv_size = sizeof(struct intel_host), }; #define VENDOR_SPECIFIC_PWRCTL_CLEAR_REG 0x1a8 #define VENDOR_SPECIFIC_PWRCTL_CTL_REG 0x1ac static irqreturn_t sdhci_acpi_qcom_handler(int irq, void *ptr) { struct sdhci_host *host = ptr; sdhci_writel(host, 0x3, VENDOR_SPECIFIC_PWRCTL_CLEAR_REG); sdhci_writel(host, 0x1, VENDOR_SPECIFIC_PWRCTL_CTL_REG); return IRQ_HANDLED; } static int qcom_probe_slot(struct platform_device *pdev, struct acpi_device *adev) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); struct sdhci_host *host = c->host; int *irq = sdhci_acpi_priv(c); *irq = -EINVAL; if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL)) return 0; *irq = platform_get_irq(pdev, 1); if (*irq < 0) return 0; return request_threaded_irq(*irq, NULL, sdhci_acpi_qcom_handler, IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "sdhci_qcom", host); } static int qcom_free_slot(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct sdhci_acpi_host *c = platform_get_drvdata(pdev); struct sdhci_host *host = c->host; struct acpi_device *adev; int *irq = sdhci_acpi_priv(c); adev = ACPI_COMPANION(dev); if (!adev) return -ENODEV; if (!acpi_dev_hid_uid_match(adev, "QCOM8051", NULL)) return 0; if (*irq < 0) return 0; free_irq(*irq, host); return 0; } static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd_3v = { .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, .quirks2 = SDHCI_QUIRK2_NO_1_8_V, .caps = MMC_CAP_NONREMOVABLE, .priv_size = sizeof(int), .probe_slot = qcom_probe_slot, .free_slot = qcom_free_slot, }; static const struct sdhci_acpi_slot sdhci_acpi_slot_qcom_sd = { .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, .caps = MMC_CAP_NONREMOVABLE, }; struct amd_sdhci_host { bool tuned_clock; bool dll_enabled; }; /* AMD sdhci reset dll register. */ #define SDHCI_AMD_RESET_DLL_REGISTER 0x908 static int amd_select_drive_strength(struct mmc_card *card, unsigned int max_dtr, int host_drv, int card_drv, int *host_driver_strength) { struct sdhci_host *host = mmc_priv(card->host); u16 preset, preset_driver_strength; /* * This method is only called by mmc_select_hs200 so we only need to * read from the HS200 (SDR104) preset register. * * Firmware that has "invalid/default" presets return a driver strength * of A. This matches the previously hard coded value. */ preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); preset_driver_strength = FIELD_GET(SDHCI_PRESET_DRV_MASK, preset); /* * We want the controller driver strength to match the card's driver * strength so they have similar rise/fall times. * * The controller driver strength set by this method is sticky for all * timings after this method is called. This unfortunately means that * while HS400 tuning is in progress we end up with mismatched driver * strengths between the controller and the card. HS400 tuning requires * switching from HS400->DDR52->HS->HS200->HS400. So the driver mismatch * happens while in DDR52 and HS modes. This has not been observed to * cause problems. Enabling presets would fix this issue. */ *host_driver_strength = preset_driver_strength; /* * The resulting card driver strength is only set when switching the * card's timing to HS200 or HS400. The card will use the default driver * strength (B) for any other mode. */ return preset_driver_strength; } static void sdhci_acpi_amd_hs400_dll(struct sdhci_host *host, bool enable) { struct sdhci_acpi_host *acpi_host = sdhci_priv(host); struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); /* AMD Platform requires dll setting */ sdhci_writel(host, 0x40003210, SDHCI_AMD_RESET_DLL_REGISTER); usleep_range(10, 20); if (enable) sdhci_writel(host, 0x40033210, SDHCI_AMD_RESET_DLL_REGISTER); amd_host->dll_enabled = enable; } /* * The initialization sequence for HS400 is: * HS->HS200->Perform Tuning->HS->HS400 * * The re-tuning sequence is: * HS400->DDR52->HS->HS200->Perform Tuning->HS->HS400 * * The AMD eMMC Controller can only use the tuned clock while in HS200 and HS400 * mode. If we switch to a different mode, we need to disable the tuned clock. * If we have previously performed tuning and switch back to HS200 or * HS400, we can re-enable the tuned clock. * */ static void amd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sdhci_host *host = mmc_priv(mmc); struct sdhci_acpi_host *acpi_host = sdhci_priv(host); struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); unsigned int old_timing = host->timing; u16 val; sdhci_set_ios(mmc, ios); if (old_timing != host->timing && amd_host->tuned_clock) { if (host->timing == MMC_TIMING_MMC_HS400 || host->timing == MMC_TIMING_MMC_HS200) { val = sdhci_readw(host, SDHCI_HOST_CONTROL2); val |= SDHCI_CTRL_TUNED_CLK; sdhci_writew(host, val, SDHCI_HOST_CONTROL2); } else { val = sdhci_readw(host, SDHCI_HOST_CONTROL2); val &= ~SDHCI_CTRL_TUNED_CLK; sdhci_writew(host, val, SDHCI_HOST_CONTROL2); } /* DLL is only required for HS400 */ if (host->timing == MMC_TIMING_MMC_HS400 && !amd_host->dll_enabled) sdhci_acpi_amd_hs400_dll(host, true); } } static int amd_sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) { int err; struct sdhci_host *host = mmc_priv(mmc); struct sdhci_acpi_host *acpi_host = sdhci_priv(host); struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); amd_host->tuned_clock = false; err = sdhci_execute_tuning(mmc, opcode); if (!err && !host->tuning_err) amd_host->tuned_clock = true; return err; } static void amd_sdhci_reset(struct sdhci_host *host, u8 mask) { struct sdhci_acpi_host *acpi_host = sdhci_priv(host); struct amd_sdhci_host *amd_host = sdhci_acpi_priv(acpi_host); if (mask & SDHCI_RESET_ALL) { amd_host->tuned_clock = false; sdhci_acpi_amd_hs400_dll(host, false); } sdhci_reset(host, mask); } static const struct sdhci_ops sdhci_acpi_ops_amd = { .set_clock = sdhci_set_clock, .set_bus_width = sdhci_set_bus_width, .reset = amd_sdhci_reset, .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_acpi_chip sdhci_acpi_chip_amd = { .ops = &sdhci_acpi_ops_amd, }; static int sdhci_acpi_emmc_amd_probe_slot(struct platform_device *pdev, struct acpi_device *adev) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); struct sdhci_host *host = c->host; sdhci_read_caps(host); if (host->caps1 & SDHCI_SUPPORT_DDR50) host->mmc->caps = MMC_CAP_1_8V_DDR; if ((host->caps1 & SDHCI_SUPPORT_SDR104) && (host->mmc->caps & MMC_CAP_1_8V_DDR)) host->mmc->caps2 = MMC_CAP2_HS400_1_8V; /* * There are two types of presets out in the wild: * 1) Default/broken presets. * These presets have two sets of problems: * a) The clock divisor for SDR12, SDR25, and SDR50 is too small. * This results in clock frequencies that are 2x higher than * acceptable. i.e., SDR12 = 25 MHz, SDR25 = 50 MHz, SDR50 = * 100 MHz.x * b) The HS200 and HS400 driver strengths don't match. * By default, the SDR104 preset register has a driver strength of * A, but the (internal) HS400 preset register has a driver * strength of B. As part of initializing HS400, HS200 tuning * needs to be performed. Having different driver strengths * between tuning and operation is wrong. It results in different * rise/fall times that lead to incorrect sampling. * 2) Firmware with properly initialized presets. * These presets have proper clock divisors. i.e., SDR12 => 12MHz, * SDR25 => 25 MHz, SDR50 => 50 MHz. Additionally the HS200 and * HS400 preset driver strengths match. * * Enabling presets for HS400 doesn't work for the following reasons: * 1) sdhci_set_ios has a hard coded list of timings that are used * to determine if presets should be enabled. * 2) sdhci_get_preset_value is using a non-standard register to * read out HS400 presets. The AMD controller doesn't support this * non-standard register. In fact, it doesn't expose the HS400 * preset register anywhere in the SDHCI memory map. This results * in reading a garbage value and using the wrong presets. * * Since HS400 and HS200 presets must be identical, we could * instead use the SDR104 preset register. * * If the above issues are resolved we could remove this quirk for * firmware that has valid presets (i.e., SDR12 <= 12 MHz). */ host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; host->mmc_host_ops.select_drive_strength = amd_select_drive_strength; host->mmc_host_ops.set_ios = amd_set_ios; host->mmc_host_ops.execute_tuning = amd_sdhci_execute_tuning; return 0; } static const struct sdhci_acpi_slot sdhci_acpi_slot_amd_emmc = { .chip = &sdhci_acpi_chip_amd, .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE, .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_32BIT_ADMA_SIZE, .quirks2 = SDHCI_QUIRK2_BROKEN_64_BIT_DMA, .probe_slot = sdhci_acpi_emmc_amd_probe_slot, .priv_size = sizeof(struct amd_sdhci_host), }; struct sdhci_acpi_uid_slot { const char *hid; const char *uid; const struct sdhci_acpi_slot *slot; }; static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { { "80865ACA", NULL, &sdhci_acpi_slot_int_sd }, { "80865ACC", NULL, &sdhci_acpi_slot_int_emmc }, { "80865AD0", NULL, &sdhci_acpi_slot_int_sdio }, { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc }, { "80860F14" , "2" , &sdhci_acpi_slot_int_sdio }, { "80860F14" , "3" , &sdhci_acpi_slot_int_sd }, { "80860F16" , NULL, &sdhci_acpi_slot_int_sd }, { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio }, { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio }, { "PNP0FFF" , "3" , &sdhci_acpi_slot_int_sd }, { "PNP0D40" }, { "QCOM8051", NULL, &sdhci_acpi_slot_qcom_sd_3v }, { "QCOM8052", NULL, &sdhci_acpi_slot_qcom_sd }, { "AMDI0040", NULL, &sdhci_acpi_slot_amd_emmc }, { "AMDI0041", NULL, &sdhci_acpi_slot_amd_emmc }, { }, }; static const struct acpi_device_id sdhci_acpi_ids[] = { { "80865ACA" }, { "80865ACC" }, { "80865AD0" }, { "80860F14" }, { "80860F16" }, { "INT33BB" }, { "INT33C6" }, { "INT3436" }, { "INT344D" }, { "PNP0D40" }, { "QCOM8051" }, { "QCOM8052" }, { "AMDI0040" }, { "AMDI0041" }, { }, }; MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); /* Please keep this list sorted alphabetically */ static const struct dmi_system_id sdhci_acpi_quirks[] = { { /* * The Acer Aspire Switch 10 (SW5-012) microSD slot always * reports the card being write-protected even though microSD * cards do not have a write-protect switch at all. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire SW5-012"), }, .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, }, { /* Asus T100TA, needs pull-up for cd but DSDT GpioInt has NoPull set */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "T100TA"), }, .driver_data = (void *)DMI_QUIRK_SD_CD_ENABLE_PULL_UP, }, { /* * The Lenovo Miix 320-10ICR has a bug in the _PS0 method of * the SHC1 ACPI device, this bug causes it to reprogram the * wrong LDO (DLDO3) to 1.8V if 1.8V modes are used and the * card is (runtime) suspended + resumed. DLDO3 is used for * the LCD and setting it to 1.8V causes the LCD to go black. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo MIIX 320-10ICR"), }, .driver_data = (void *)DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP, }, { /* * Lenovo Yoga Tablet 2 Pro 1380F/L (13" Android version) this * has broken WP reporting and an inverted CD signal. * Note this has more or less the same BIOS as the Lenovo Yoga * Tablet 2 830F/L or 1050F/L (8" and 10" Android), but unlike * the 830 / 1050 models which share the same mainboard this * model has a different mainboard and the inverted CD and * broken WP are unique to this board. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), /* Full match so as to NOT match the 830/1050 BIOS */ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"), }, .driver_data = (void *)(DMI_QUIRK_SD_NO_WRITE_PROTECT | DMI_QUIRK_SD_CD_ACTIVE_HIGH), }, { /* * The Toshiba WT8-B's microSD slot always reports the card being * write-protected. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA ENCORE 2 WT8-B"), }, .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, }, { /* * The Toshiba WT10-A's microSD slot always reports the card being * write-protected. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "TOSHIBA WT10-A"), }, .driver_data = (void *)DMI_QUIRK_SD_NO_WRITE_PROTECT, }, {} /* Terminating entry */ }; static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(struct acpi_device *adev) { const struct sdhci_acpi_uid_slot *u; for (u = sdhci_acpi_uids; u->hid; u++) { if (acpi_dev_hid_uid_match(adev, u->hid, u->uid)) return u->slot; } return NULL; } static int sdhci_acpi_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct sdhci_acpi_slot *slot; const struct dmi_system_id *id; struct acpi_device *device; struct sdhci_acpi_host *c; struct sdhci_host *host; struct resource *iomem; resource_size_t len; size_t priv_size; int quirks = 0; int err; device = ACPI_COMPANION(dev); if (!device) return -ENODEV; id = dmi_first_match(sdhci_acpi_quirks); if (id) quirks = (long)id->driver_data; slot = sdhci_acpi_get_slot(device); /* Power on the SDHCI controller and its children */ acpi_device_fix_up_power_extended(device); if (sdhci_acpi_byt_defer(dev)) return -EPROBE_DEFER; iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) return -ENOMEM; len = resource_size(iomem); if (len < 0x100) dev_err(dev, "Invalid iomem size!\n"); if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) return -ENOMEM; priv_size = slot ? slot->priv_size : 0; host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host) + priv_size); if (IS_ERR(host)) return PTR_ERR(host); c = sdhci_priv(host); c->host = host; c->slot = slot; c->pdev = pdev; c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); platform_set_drvdata(pdev, c); host->hw_name = "ACPI"; host->ops = &sdhci_acpi_ops_dflt; host->irq = platform_get_irq(pdev, 0); if (host->irq < 0) { err = host->irq; goto err_free; } host->ioaddr = devm_ioremap(dev, iomem->start, resource_size(iomem)); if (host->ioaddr == NULL) { err = -ENOMEM; goto err_free; } if (c->slot) { if (c->slot->probe_slot) { err = c->slot->probe_slot(pdev, device); if (err) goto err_free; } if (c->slot->chip) { host->ops = c->slot->chip->ops; host->quirks |= c->slot->chip->quirks; host->quirks2 |= c->slot->chip->quirks2; host->mmc->caps |= c->slot->chip->caps; host->mmc->caps2 |= c->slot->chip->caps2; host->mmc->pm_caps |= c->slot->chip->pm_caps; } host->quirks |= c->slot->quirks; host->quirks2 |= c->slot->quirks2; host->mmc->caps |= c->slot->caps; host->mmc->caps2 |= c->slot->caps2; host->mmc->pm_caps |= c->slot->pm_caps; } host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); if (quirks & DMI_QUIRK_SD_CD_ACTIVE_HIGH) host->mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; err = mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0); if (err) { if (err == -EPROBE_DEFER) goto err_free; dev_warn(dev, "failed to setup card detect gpio\n"); c->use_runtime_pm = false; } else if (quirks & DMI_QUIRK_SD_CD_ENABLE_PULL_UP) { mmc_gpiod_set_cd_config(host->mmc, PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 20000)); } if (quirks & DMI_QUIRK_RESET_SD_SIGNAL_VOLT_ON_SUSP) c->reset_signal_volt_on_suspend = true; if (quirks & DMI_QUIRK_SD_NO_WRITE_PROTECT) host->mmc->caps2 |= MMC_CAP2_NO_WRITE_PROTECT; } err = sdhci_setup_host(host); if (err) goto err_free; if (c->slot && c->slot->setup_host) { err = c->slot->setup_host(pdev); if (err) goto err_cleanup; } err = __sdhci_add_host(host); if (err) goto err_cleanup; if (c->use_runtime_pm) { pm_runtime_set_active(dev); pm_suspend_ignore_children(dev, 1); pm_runtime_set_autosuspend_delay(dev, 50); pm_runtime_use_autosuspend(dev); pm_runtime_enable(dev); } device_enable_async_suspend(dev); return 0; err_cleanup: sdhci_cleanup_host(c->host); err_free: if (c->slot && c->slot->free_slot) c->slot->free_slot(pdev); sdhci_free_host(c->host); return err; } static void sdhci_acpi_remove(struct platform_device *pdev) { struct sdhci_acpi_host *c = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; int dead; if (c->use_runtime_pm) { pm_runtime_get_sync(dev); pm_runtime_disable(dev); pm_runtime_put_noidle(dev); } if (c->slot && c->slot->remove_slot) c->slot->remove_slot(pdev); dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); sdhci_remove_host(c->host, dead); if (c->slot && c->slot->free_slot) c->slot->free_slot(pdev); sdhci_free_host(c->host); } static void __maybe_unused sdhci_acpi_reset_signal_voltage_if_needed( struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); struct sdhci_host *host = c->host; if (c->is_intel && c->reset_signal_volt_on_suspend && host->mmc->ios.signal_voltage != MMC_SIGNAL_VOLTAGE_330) { struct intel_host *intel_host = sdhci_acpi_priv(c); unsigned int fn = INTEL_DSM_V33_SWITCH; u32 result = 0; intel_dsm(intel_host, dev, fn, &result); } } #ifdef CONFIG_PM_SLEEP static int sdhci_acpi_suspend(struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); struct sdhci_host *host = c->host; int ret; if (host->tuning_mode != SDHCI_TUNING_MODE_3) mmc_retune_needed(host->mmc); ret = sdhci_suspend_host(host); if (ret) return ret; sdhci_acpi_reset_signal_voltage_if_needed(dev); return 0; } static int sdhci_acpi_resume(struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); sdhci_acpi_byt_setting(&c->pdev->dev); return sdhci_resume_host(c->host); } #endif #ifdef CONFIG_PM static int sdhci_acpi_runtime_suspend(struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); struct sdhci_host *host = c->host; int ret; if (host->tuning_mode != SDHCI_TUNING_MODE_3) mmc_retune_needed(host->mmc); ret = sdhci_runtime_suspend_host(host); if (ret) return ret; sdhci_acpi_reset_signal_voltage_if_needed(dev); return 0; } static int sdhci_acpi_runtime_resume(struct device *dev) { struct sdhci_acpi_host *c = dev_get_drvdata(dev); sdhci_acpi_byt_setting(&c->pdev->dev); return sdhci_runtime_resume_host(c->host, 0); } #endif static const struct dev_pm_ops sdhci_acpi_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(sdhci_acpi_suspend, sdhci_acpi_resume) SET_RUNTIME_PM_OPS(sdhci_acpi_runtime_suspend, sdhci_acpi_runtime_resume, NULL) }; static struct platform_driver sdhci_acpi_driver = { .driver = { .name = "sdhci-acpi", .probe_type = PROBE_PREFER_ASYNCHRONOUS, .acpi_match_table = sdhci_acpi_ids, .pm = &sdhci_acpi_pm_ops, }, .probe = sdhci_acpi_probe, .remove = sdhci_acpi_remove, }; module_platform_driver(sdhci_acpi_driver); MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver"); MODULE_AUTHOR("Adrian Hunter"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0 /* * Based on arch/arm/kernel/atags_proc.c */ #include <linux/fs.h> #include <linux/init.h> #include <linux/printk.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/string.h> #include <asm/bootinfo.h> #include <asm/byteorder.h> static char bootinfo_tmp[1536] __initdata; static void *bootinfo_copy; static size_t bootinfo_size; static ssize_t bootinfo_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(buf, count, ppos, bootinfo_copy, bootinfo_size); } static const struct proc_ops bootinfo_proc_ops = { .proc_read = bootinfo_read, .proc_lseek = default_llseek, }; void __init save_bootinfo(const struct bi_record *bi) { const void *start = bi; size_t size = sizeof(bi->tag); while (be16_to_cpu(bi->tag) != BI_LAST) { uint16_t n = be16_to_cpu(bi->size); size += n; bi = (struct bi_record *)((unsigned long)bi + n); } if (size > sizeof(bootinfo_tmp)) { pr_err("Cannot save %zu bytes of bootinfo\n", size); return; } pr_info("Saving %zu bytes of bootinfo\n", size); memcpy(bootinfo_tmp, start, size); bootinfo_size = size; } static int __init init_bootinfo_procfs(void) { /* * This cannot go into save_bootinfo() because kmalloc and proc don't * work yet when it is called. */ struct proc_dir_entry *pde; if (!bootinfo_size) return -EINVAL; bootinfo_copy = kmemdup(bootinfo_tmp, bootinfo_size, GFP_KERNEL); if (!bootinfo_copy) return -ENOMEM; pde = proc_create_data("bootinfo", 0400, NULL, &bootinfo_proc_ops, NULL); if (!pde) { kfree(bootinfo_copy); return -ENOMEM; } return 0; } arch_initcall(init_bootinfo_procfs);
// SPDX-License-Identifier: GPL-2.0+ /* * comedi/drivers/ni_routing/ni_device_routes/pci-6713.c * List of valid routes for specific NI boards. * * COMEDI - Linux Control and Measurement Device Interface * Copyright (C) 2016 Spencer E. Olson <[email protected]> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * The contents of this file are generated using the tools in * comedi/drivers/ni_routing/tools * * Please use those tools to help maintain the contents of this file. */ #include "../ni_device_routes.h" #include "all.h" struct ni_device_routes ni_pci_6713_device_routes = { .device = "pci-6713", .routes = (struct ni_route_set[]){ { .dest = NI_PFI(3), .src = (int[]){ NI_CtrSource(1), 0, /* Termination */ } }, { .dest = NI_PFI(4), .src = (int[]){ NI_CtrGate(1), 0, /* Termination */ } }, { .dest = NI_PFI(5), .src = (int[]){ NI_AO_SampleClock, 0, /* Termination */ } }, { .dest = NI_PFI(6), .src = (int[]){ NI_AO_StartTrigger, 0, /* Termination */ } }, { .dest = NI_PFI(8), .src = (int[]){ NI_CtrSource(0), 0, /* Termination */ } }, { .dest = NI_PFI(9), .src = (int[]){ NI_CtrGate(0), 0, /* Termination */ } }, { .dest = TRIGGER_LINE(0), .src = (int[]){ NI_CtrSource(0), NI_CtrGate(0), NI_CtrInternalOutput(0), NI_CtrOut(0), NI_AO_SampleClock, NI_AO_StartTrigger, 0, /* Termination */ } }, { .dest = TRIGGER_LINE(1), .src = (int[]){ NI_CtrSource(0), NI_CtrGate(0), NI_CtrInternalOutput(0), NI_CtrOut(0), NI_AO_SampleClock, NI_AO_StartTrigger, 0, /* Termination */ } }, { .dest = TRIGGER_LINE(2), .src = (int[]){ NI_CtrSource(0), NI_CtrGate(0), NI_CtrInternalOutput(0), NI_CtrOut(0), NI_AO_SampleClock, NI_AO_StartTrigger, 0, /* Termination */ } }, { .dest = TRIGGER_LINE(3), .src = (int[]){ NI_CtrSource(0), NI_CtrGate(0), NI_CtrInternalOutput(0), NI_CtrOut(0), NI_AO_SampleClock, NI_AO_StartTrigger, 0, /* Termination */ } }, { .dest = TRIGGER_LINE(4), .src = (int[]){ NI_CtrSource(0), NI_CtrGate(0), NI_CtrInternalOutput(0), NI_CtrOut(0), NI_AO_SampleClock, NI_AO_StartTrigger, 0, /* Termination */ } }, { .dest = TRIGGER_LINE(5), .src = (int[]){ NI_CtrSource(0), NI_CtrGate(0), NI_CtrInternalOutput(0), NI_CtrOut(0), NI_AO_SampleClock, NI_AO_StartTrigger, 0, /* Termination */ } }, { .dest = TRIGGER_LINE(6), .src = (int[]){ NI_CtrSource(0), NI_CtrGate(0), NI_CtrInternalOutput(0), NI_CtrOut(0), NI_AO_SampleClock, NI_AO_StartTrigger, 0, /* Termination */ } }, { .dest = TRIGGER_LINE(7), .src = (int[]){ NI_20MHzTimebase, 0, /* Termination */ } }, { .dest = NI_CtrSource(0), .src = (int[]){ NI_PFI(0), NI_PFI(1), NI_PFI(2), NI_PFI(3), NI_PFI(4), NI_PFI(5), NI_PFI(6), NI_PFI(7), NI_PFI(8), NI_PFI(9), TRIGGER_LINE(0), TRIGGER_LINE(1), TRIGGER_LINE(2), TRIGGER_LINE(3), TRIGGER_LINE(4), TRIGGER_LINE(5), TRIGGER_LINE(6), TRIGGER_LINE(7), NI_MasterTimebase, NI_20MHzTimebase, NI_100kHzTimebase, 0, /* Termination */ } }, { .dest = NI_CtrSource(1), .src = (int[]){ NI_PFI(0), NI_PFI(1), NI_PFI(2), NI_PFI(3), NI_PFI(4), NI_PFI(5), NI_PFI(6), NI_PFI(7), NI_PFI(8), NI_PFI(9), TRIGGER_LINE(0), TRIGGER_LINE(1), TRIGGER_LINE(2), TRIGGER_LINE(3), TRIGGER_LINE(4), TRIGGER_LINE(5), TRIGGER_LINE(6), TRIGGER_LINE(7), NI_MasterTimebase, NI_20MHzTimebase, NI_100kHzTimebase, 0, /* Termination */ } }, { .dest = NI_CtrGate(0), .src = (int[]){ NI_PFI(0), NI_PFI(1), NI_PFI(2), NI_PFI(3), NI_PFI(4), NI_PFI(5), NI_PFI(6), NI_PFI(7), NI_PFI(8), NI_PFI(9), TRIGGER_LINE(0), TRIGGER_LINE(1), TRIGGER_LINE(2), TRIGGER_LINE(3), TRIGGER_LINE(4), TRIGGER_LINE(5), TRIGGER_LINE(6), NI_CtrInternalOutput(1), 0, /* Termination */ } }, { .dest = NI_CtrGate(1), .src = (int[]){ NI_PFI(0), NI_PFI(1), NI_PFI(2), NI_PFI(3), NI_PFI(4), NI_PFI(5), NI_PFI(6), NI_PFI(7), NI_PFI(8), NI_PFI(9), TRIGGER_LINE(0), TRIGGER_LINE(1), TRIGGER_LINE(2), TRIGGER_LINE(3), TRIGGER_LINE(4), TRIGGER_LINE(5), TRIGGER_LINE(6), NI_CtrInternalOutput(0), 0, /* Termination */ } }, { .dest = NI_CtrOut(0), .src = (int[]){ TRIGGER_LINE(0), TRIGGER_LINE(1), TRIGGER_LINE(2), TRIGGER_LINE(3), TRIGGER_LINE(4), TRIGGER_LINE(5), TRIGGER_LINE(6), NI_CtrInternalOutput(0), 0, /* Termination */ } }, { .dest = NI_CtrOut(1), .src = (int[]){ NI_CtrInternalOutput(1), 0, /* Termination */ } }, { .dest = NI_AO_SampleClock, .src = (int[]){ NI_PFI(0), NI_PFI(1), NI_PFI(2), NI_PFI(3), NI_PFI(4), NI_PFI(5), NI_PFI(6), NI_PFI(7), NI_PFI(8), NI_PFI(9), TRIGGER_LINE(0), TRIGGER_LINE(1), TRIGGER_LINE(2), TRIGGER_LINE(3), TRIGGER_LINE(4), TRIGGER_LINE(5), TRIGGER_LINE(6), NI_CtrInternalOutput(1), NI_AO_SampleClockTimebase, 0, /* Termination */ } }, { .dest = NI_AO_SampleClockTimebase, .src = (int[]){ NI_PFI(0), NI_PFI(1), NI_PFI(2), NI_PFI(3), NI_PFI(4), NI_PFI(5), NI_PFI(6), NI_PFI(7), NI_PFI(8), NI_PFI(9), TRIGGER_LINE(0), TRIGGER_LINE(1), TRIGGER_LINE(2), TRIGGER_LINE(3), TRIGGER_LINE(4), TRIGGER_LINE(5), TRIGGER_LINE(6), TRIGGER_LINE(7), NI_MasterTimebase, NI_20MHzTimebase, NI_100kHzTimebase, 0, /* Termination */ } }, { .dest = NI_AO_StartTrigger, .src = (int[]){ NI_PFI(0), NI_PFI(1), NI_PFI(2), NI_PFI(3), NI_PFI(4), NI_PFI(5), NI_PFI(6), NI_PFI(7), NI_PFI(8), NI_PFI(9), TRIGGER_LINE(0), TRIGGER_LINE(1), TRIGGER_LINE(2), TRIGGER_LINE(3), TRIGGER_LINE(4), TRIGGER_LINE(5), TRIGGER_LINE(6), 0, /* Termination */ } }, { .dest = NI_AO_PauseTrigger, .src = (int[]){ NI_PFI(0), NI_PFI(1), NI_PFI(2), NI_PFI(3), NI_PFI(4), NI_PFI(5), NI_PFI(6), NI_PFI(7), NI_PFI(8), NI_PFI(9), TRIGGER_LINE(0), TRIGGER_LINE(1), TRIGGER_LINE(2), TRIGGER_LINE(3), TRIGGER_LINE(4), TRIGGER_LINE(5), TRIGGER_LINE(6), 0, /* Termination */ } }, { .dest = NI_MasterTimebase, .src = (int[]){ TRIGGER_LINE(7), NI_20MHzTimebase, 0, /* Termination */ } }, { /* Termination of list */ .dest = 0, }, }, };
// SPDX-License-Identifier: GPL-2.0-or-later /* * Abilis Systems Single DVB-T Receiver * Copyright (C) 2008 Pierrick Hascoet <[email protected]> * Copyright (C) 2010 Devin Heitmueller <[email protected]> */ #include <media/dvb_frontend.h> #include "as102_fe.h" struct as102_state { struct dvb_frontend frontend; struct as10x_demod_stats demod_stats; const struct as102_fe_ops *ops; void *priv; uint8_t elna_cfg; /* signal strength */ uint16_t signal_strength; /* bit error rate */ uint32_t ber; }; static uint8_t as102_fe_get_code_rate(enum fe_code_rate arg) { uint8_t c; switch (arg) { case FEC_1_2: c = CODE_RATE_1_2; break; case FEC_2_3: c = CODE_RATE_2_3; break; case FEC_3_4: c = CODE_RATE_3_4; break; case FEC_5_6: c = CODE_RATE_5_6; break; case FEC_7_8: c = CODE_RATE_7_8; break; default: c = CODE_RATE_UNKNOWN; break; } return c; } static int as102_fe_set_frontend(struct dvb_frontend *fe) { struct as102_state *state = fe->demodulator_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct as10x_tune_args tune_args = { 0 }; /* set frequency */ tune_args.freq = c->frequency / 1000; /* fix interleaving_mode */ tune_args.interleaving_mode = INTLV_NATIVE; switch (c->bandwidth_hz) { case 8000000: tune_args.bandwidth = BW_8_MHZ; break; case 7000000: tune_args.bandwidth = BW_7_MHZ; break; case 6000000: tune_args.bandwidth = BW_6_MHZ; break; default: tune_args.bandwidth = BW_8_MHZ; } switch (c->guard_interval) { case GUARD_INTERVAL_1_32: tune_args.guard_interval = GUARD_INT_1_32; break; case GUARD_INTERVAL_1_16: tune_args.guard_interval = GUARD_INT_1_16; break; case GUARD_INTERVAL_1_8: tune_args.guard_interval = GUARD_INT_1_8; break; case GUARD_INTERVAL_1_4: tune_args.guard_interval = GUARD_INT_1_4; break; case GUARD_INTERVAL_AUTO: default: tune_args.guard_interval = GUARD_UNKNOWN; break; } switch (c->modulation) { case QPSK: tune_args.modulation = CONST_QPSK; break; case QAM_16: tune_args.modulation = CONST_QAM16; break; case QAM_64: tune_args.modulation = CONST_QAM64; break; default: tune_args.modulation = CONST_UNKNOWN; break; } switch (c->transmission_mode) { case TRANSMISSION_MODE_2K: tune_args.transmission_mode = TRANS_MODE_2K; break; case TRANSMISSION_MODE_8K: tune_args.transmission_mode = TRANS_MODE_8K; break; default: tune_args.transmission_mode = TRANS_MODE_UNKNOWN; } switch (c->hierarchy) { case HIERARCHY_NONE: tune_args.hierarchy = HIER_NONE; break; case HIERARCHY_1: tune_args.hierarchy = HIER_ALPHA_1; break; case HIERARCHY_2: tune_args.hierarchy = HIER_ALPHA_2; break; case HIERARCHY_4: tune_args.hierarchy = HIER_ALPHA_4; break; case HIERARCHY_AUTO: tune_args.hierarchy = HIER_UNKNOWN; break; } pr_debug("as102: tuner parameters: freq: %d bw: 0x%02x gi: 0x%02x\n", c->frequency, tune_args.bandwidth, tune_args.guard_interval); /* * Detect a hierarchy selection * if HP/LP are both set to FEC_NONE, HP will be selected. */ if ((tune_args.hierarchy != HIER_NONE) && ((c->code_rate_LP == FEC_NONE) || (c->code_rate_HP == FEC_NONE))) { if (c->code_rate_LP == FEC_NONE) { tune_args.hier_select = HIER_HIGH_PRIORITY; tune_args.code_rate = as102_fe_get_code_rate(c->code_rate_HP); } if (c->code_rate_HP == FEC_NONE) { tune_args.hier_select = HIER_LOW_PRIORITY; tune_args.code_rate = as102_fe_get_code_rate(c->code_rate_LP); } pr_debug("as102: \thierarchy: 0x%02x selected: %s code_rate_%s: 0x%02x\n", tune_args.hierarchy, tune_args.hier_select == HIER_HIGH_PRIORITY ? "HP" : "LP", tune_args.hier_select == HIER_HIGH_PRIORITY ? "HP" : "LP", tune_args.code_rate); } else { tune_args.code_rate = as102_fe_get_code_rate(c->code_rate_HP); } /* Set frontend arguments */ return state->ops->set_tune(state->priv, &tune_args); } static int as102_fe_get_frontend(struct dvb_frontend *fe, struct dtv_frontend_properties *c) { struct as102_state *state = fe->demodulator_priv; int ret = 0; struct as10x_tps tps = { 0 }; /* send abilis command: GET_TPS */ ret = state->ops->get_tps(state->priv, &tps); if (ret < 0) return ret; /* extract constellation */ switch (tps.modulation) { case CONST_QPSK: c->modulation = QPSK; break; case CONST_QAM16: c->modulation = QAM_16; break; case CONST_QAM64: c->modulation = QAM_64; break; } /* extract hierarchy */ switch (tps.hierarchy) { case HIER_NONE: c->hierarchy = HIERARCHY_NONE; break; case HIER_ALPHA_1: c->hierarchy = HIERARCHY_1; break; case HIER_ALPHA_2: c->hierarchy = HIERARCHY_2; break; case HIER_ALPHA_4: c->hierarchy = HIERARCHY_4; break; } /* extract code rate HP */ switch (tps.code_rate_HP) { case CODE_RATE_1_2: c->code_rate_HP = FEC_1_2; break; case CODE_RATE_2_3: c->code_rate_HP = FEC_2_3; break; case CODE_RATE_3_4: c->code_rate_HP = FEC_3_4; break; case CODE_RATE_5_6: c->code_rate_HP = FEC_5_6; break; case CODE_RATE_7_8: c->code_rate_HP = FEC_7_8; break; } /* extract code rate LP */ switch (tps.code_rate_LP) { case CODE_RATE_1_2: c->code_rate_LP = FEC_1_2; break; case CODE_RATE_2_3: c->code_rate_LP = FEC_2_3; break; case CODE_RATE_3_4: c->code_rate_LP = FEC_3_4; break; case CODE_RATE_5_6: c->code_rate_LP = FEC_5_6; break; case CODE_RATE_7_8: c->code_rate_LP = FEC_7_8; break; } /* extract guard interval */ switch (tps.guard_interval) { case GUARD_INT_1_32: c->guard_interval = GUARD_INTERVAL_1_32; break; case GUARD_INT_1_16: c->guard_interval = GUARD_INTERVAL_1_16; break; case GUARD_INT_1_8: c->guard_interval = GUARD_INTERVAL_1_8; break; case GUARD_INT_1_4: c->guard_interval = GUARD_INTERVAL_1_4; break; } /* extract transmission mode */ switch (tps.transmission_mode) { case TRANS_MODE_2K: c->transmission_mode = TRANSMISSION_MODE_2K; break; case TRANS_MODE_8K: c->transmission_mode = TRANSMISSION_MODE_8K; break; } return 0; } static int as102_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *settings) { settings->min_delay_ms = 1000; return 0; } static int as102_fe_read_status(struct dvb_frontend *fe, enum fe_status *status) { int ret = 0; struct as102_state *state = fe->demodulator_priv; struct as10x_tune_status tstate = { 0 }; /* send abilis command: GET_TUNE_STATUS */ ret = state->ops->get_status(state->priv, &tstate); if (ret < 0) return ret; state->signal_strength = tstate.signal_strength; state->ber = tstate.BER; switch (tstate.tune_state) { case TUNE_STATUS_SIGNAL_DVB_OK: *status = FE_HAS_SIGNAL | FE_HAS_CARRIER; break; case TUNE_STATUS_STREAM_DETECTED: *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC | FE_HAS_VITERBI; break; case TUNE_STATUS_STREAM_TUNED: *status = FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_SYNC | FE_HAS_LOCK | FE_HAS_VITERBI; break; default: *status = TUNE_STATUS_NOT_TUNED; } pr_debug("as102: tuner status: 0x%02x, strength %d, per: %d, ber: %d\n", tstate.tune_state, tstate.signal_strength, tstate.PER, tstate.BER); if (!(*status & FE_HAS_LOCK)) { memset(&state->demod_stats, 0, sizeof(state->demod_stats)); return 0; } ret = state->ops->get_stats(state->priv, &state->demod_stats); if (ret < 0) memset(&state->demod_stats, 0, sizeof(state->demod_stats)); return ret; } /* * Note: * - in AS102 SNR=MER * - the SNR will be returned in linear terms, i.e. not in dB * - the accuracy equals ±2dB for a SNR range from 4dB to 30dB * - the accuracy is >2dB for SNR values outside this range */ static int as102_fe_read_snr(struct dvb_frontend *fe, u16 *snr) { struct as102_state *state = fe->demodulator_priv; *snr = state->demod_stats.mer; return 0; } static int as102_fe_read_ber(struct dvb_frontend *fe, u32 *ber) { struct as102_state *state = fe->demodulator_priv; *ber = state->ber; return 0; } static int as102_fe_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct as102_state *state = fe->demodulator_priv; *strength = (((0xffff * 400) * state->signal_strength + 41000) * 2); return 0; } static int as102_fe_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct as102_state *state = fe->demodulator_priv; if (state->demod_stats.has_started) *ucblocks = state->demod_stats.bad_frame_count; else *ucblocks = 0; return 0; } static int as102_fe_ts_bus_ctrl(struct dvb_frontend *fe, int acquire) { struct as102_state *state = fe->demodulator_priv; return state->ops->stream_ctrl(state->priv, acquire, state->elna_cfg); } static void as102_fe_release(struct dvb_frontend *fe) { struct as102_state *state = fe->demodulator_priv; kfree(state); } static const struct dvb_frontend_ops as102_fe_ops = { .delsys = { SYS_DVBT }, .info = { .name = "Abilis AS102 DVB-T", .frequency_min_hz = 174 * MHz, .frequency_max_hz = 862 * MHz, .frequency_stepsize_hz = 166667, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QPSK | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER | FE_CAN_MUTE_TS }, .set_frontend = as102_fe_set_frontend, .get_frontend = as102_fe_get_frontend, .get_tune_settings = as102_fe_get_tune_settings, .read_status = as102_fe_read_status, .read_snr = as102_fe_read_snr, .read_ber = as102_fe_read_ber, .read_signal_strength = as102_fe_read_signal_strength, .read_ucblocks = as102_fe_read_ucblocks, .ts_bus_ctrl = as102_fe_ts_bus_ctrl, .release = as102_fe_release, }; struct dvb_frontend *as102_attach(const char *name, const struct as102_fe_ops *ops, void *priv, uint8_t elna_cfg) { struct as102_state *state; struct dvb_frontend *fe; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return NULL; fe = &state->frontend; fe->demodulator_priv = state; state->ops = ops; state->priv = priv; state->elna_cfg = elna_cfg; /* init frontend callback ops */ memcpy(&fe->ops, &as102_fe_ops, sizeof(struct dvb_frontend_ops)); strscpy(fe->ops.info.name, name, sizeof(fe->ops.info.name)); return fe; } EXPORT_SYMBOL_GPL(as102_attach); MODULE_DESCRIPTION("as102-fe"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pierrick Hascoet <[email protected]>");
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2022 Yassine Oudjana <[email protected]> */ #include <linux/clk-provider.h> #include <linux/platform_device.h> #include "clk-gate.h" #include "clk-mtk.h" #include <dt-bindings/clock/mediatek,mt6735-mfgcfg.h> #define MFG_CG_CON 0x00 #define MFG_CG_SET 0x04 #define MFG_CG_CLR 0x08 #define MFG_RESET 0x0c static struct mtk_gate_regs mfgcfg_cg_regs = { .set_ofs = MFG_CG_SET, .clr_ofs = MFG_CG_CLR, .sta_ofs = MFG_CG_CON, }; static const struct mtk_gate mfgcfg_gates[] = { GATE_MTK(CLK_MFG_BG3D, "bg3d", "mfg_sel", &mfgcfg_cg_regs, 0, &mtk_clk_gate_ops_setclr), }; static u16 mfgcfg_rst_ofs[] = { MFG_RESET }; static const struct mtk_clk_rst_desc mfgcfg_resets = { .version = MTK_RST_SIMPLE, .rst_bank_ofs = mfgcfg_rst_ofs, .rst_bank_nr = ARRAY_SIZE(mfgcfg_rst_ofs) }; static const struct mtk_clk_desc mfgcfg_clks = { .clks = mfgcfg_gates, .num_clks = ARRAY_SIZE(mfgcfg_gates), .rst_desc = &mfgcfg_resets }; static const struct of_device_id of_match_mt6735_mfgcfg[] = { { .compatible = "mediatek,mt6735-mfgcfg", .data = &mfgcfg_clks }, { /* sentinel */ } }; static struct platform_driver clk_mt6735_mfgcfg = { .probe = mtk_clk_simple_probe, .remove = mtk_clk_simple_remove, .driver = { .name = "clk-mt6735-mfgcfg", .of_match_table = of_match_mt6735_mfgcfg, }, }; module_platform_driver(clk_mt6735_mfgcfg); MODULE_AUTHOR("Yassine Oudjana <[email protected]>"); MODULE_DESCRIPTION("Mediatek MT6735 mfgcfg clock and reset driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * Test module for stress and analyze performance of vmalloc allocator. * (C) 2018 Uladzislau Rezki (Sony) <[email protected]> */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/vmalloc.h> #include <linux/random.h> #include <linux/kthread.h> #include <linux/moduleparam.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/rwsem.h> #include <linux/mm.h> #include <linux/rcupdate.h> #include <linux/slab.h> #define __param(type, name, init, msg) \ static type name = init; \ module_param(name, type, 0444); \ MODULE_PARM_DESC(name, msg) \ __param(int, nr_threads, 0, "Number of workers to perform tests(min: 1 max: USHRT_MAX)"); __param(bool, sequential_test_order, false, "Use sequential stress tests order"); __param(int, test_repeat_count, 1, "Set test repeat counter"); __param(int, test_loop_count, 1000000, "Set test loop counter"); __param(int, nr_pages, 0, "Set number of pages for fix_size_alloc_test(default: 1)"); __param(bool, use_huge, false, "Use vmalloc_huge in fix_size_alloc_test"); __param(int, run_test_mask, INT_MAX, "Set tests specified in the mask.\n\n" "\t\tid: 1, name: fix_size_alloc_test\n" "\t\tid: 2, name: full_fit_alloc_test\n" "\t\tid: 4, name: long_busy_list_alloc_test\n" "\t\tid: 8, name: random_size_alloc_test\n" "\t\tid: 16, name: fix_align_alloc_test\n" "\t\tid: 32, name: random_size_align_alloc_test\n" "\t\tid: 64, name: align_shift_alloc_test\n" "\t\tid: 128, name: pcpu_alloc_test\n" "\t\tid: 256, name: kvfree_rcu_1_arg_vmalloc_test\n" "\t\tid: 512, name: kvfree_rcu_2_arg_vmalloc_test\n" "\t\tid: 1024, name: vm_map_ram_test\n" /* Add a new test case description here. */ ); /* * Read write semaphore for synchronization of setup * phase that is done in main thread and workers. */ static DECLARE_RWSEM(prepare_for_test_rwsem); /* * Completion tracking for worker threads. */ static DECLARE_COMPLETION(test_all_done_comp); static atomic_t test_n_undone = ATOMIC_INIT(0); static inline void test_report_one_done(void) { if (atomic_dec_and_test(&test_n_undone)) complete(&test_all_done_comp); } static int random_size_align_alloc_test(void) { unsigned long size, align; unsigned int rnd; void *ptr; int i; for (i = 0; i < test_loop_count; i++) { rnd = get_random_u8(); /* * Maximum 1024 pages, if PAGE_SIZE is 4096. */ align = 1 << (rnd % 23); /* * Maximum 10 pages. */ size = ((rnd % 10) + 1) * PAGE_SIZE; ptr = __vmalloc_node(size, align, GFP_KERNEL | __GFP_ZERO, 0, __builtin_return_address(0)); if (!ptr) return -1; vfree(ptr); } return 0; } /* * This test case is supposed to be failed. */ static int align_shift_alloc_test(void) { unsigned long align; void *ptr; int i; for (i = 0; i < BITS_PER_LONG; i++) { align = 1UL << i; ptr = __vmalloc_node(PAGE_SIZE, align, GFP_KERNEL|__GFP_ZERO, 0, __builtin_return_address(0)); if (!ptr) return -1; vfree(ptr); } return 0; } static int fix_align_alloc_test(void) { void *ptr; int i; for (i = 0; i < test_loop_count; i++) { ptr = __vmalloc_node(5 * PAGE_SIZE, THREAD_ALIGN << 1, GFP_KERNEL | __GFP_ZERO, 0, __builtin_return_address(0)); if (!ptr) return -1; vfree(ptr); } return 0; } static int random_size_alloc_test(void) { unsigned int n; void *p; int i; for (i = 0; i < test_loop_count; i++) { n = get_random_u32_inclusive(1, 100); p = vmalloc(n * PAGE_SIZE); if (!p) return -1; *((__u8 *)p) = 1; vfree(p); } return 0; } static int long_busy_list_alloc_test(void) { void *ptr_1, *ptr_2; void **ptr; int rv = -1; int i; ptr = vmalloc(sizeof(void *) * 15000); if (!ptr) return rv; for (i = 0; i < 15000; i++) ptr[i] = vmalloc(1 * PAGE_SIZE); for (i = 0; i < test_loop_count; i++) { ptr_1 = vmalloc(100 * PAGE_SIZE); if (!ptr_1) goto leave; ptr_2 = vmalloc(1 * PAGE_SIZE); if (!ptr_2) { vfree(ptr_1); goto leave; } *((__u8 *)ptr_1) = 0; *((__u8 *)ptr_2) = 1; vfree(ptr_1); vfree(ptr_2); } /* Success */ rv = 0; leave: for (i = 0; i < 15000; i++) vfree(ptr[i]); vfree(ptr); return rv; } static int full_fit_alloc_test(void) { void **ptr, **junk_ptr, *tmp; int junk_length; int rv = -1; int i; junk_length = fls(num_online_cpus()); junk_length *= (32 * 1024 * 1024 / PAGE_SIZE); ptr = vmalloc(sizeof(void *) * junk_length); if (!ptr) return rv; junk_ptr = vmalloc(sizeof(void *) * junk_length); if (!junk_ptr) { vfree(ptr); return rv; } for (i = 0; i < junk_length; i++) { ptr[i] = vmalloc(1 * PAGE_SIZE); junk_ptr[i] = vmalloc(1 * PAGE_SIZE); } for (i = 0; i < junk_length; i++) vfree(junk_ptr[i]); for (i = 0; i < test_loop_count; i++) { tmp = vmalloc(1 * PAGE_SIZE); if (!tmp) goto error; *((__u8 *)tmp) = 1; vfree(tmp); } /* Success */ rv = 0; error: for (i = 0; i < junk_length; i++) vfree(ptr[i]); vfree(ptr); vfree(junk_ptr); return rv; } static int fix_size_alloc_test(void) { void *ptr; int i; for (i = 0; i < test_loop_count; i++) { if (use_huge) ptr = vmalloc_huge((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE, GFP_KERNEL); else ptr = vmalloc((nr_pages > 0 ? nr_pages:1) * PAGE_SIZE); if (!ptr) return -1; *((__u8 *)ptr) = 0; vfree(ptr); } return 0; } static int pcpu_alloc_test(void) { int rv = 0; #ifndef CONFIG_NEED_PER_CPU_KM void __percpu **pcpu; size_t size, align; int i; pcpu = vmalloc(sizeof(void __percpu *) * 35000); if (!pcpu) return -1; for (i = 0; i < 35000; i++) { size = get_random_u32_inclusive(1, PAGE_SIZE / 4); /* * Maximum PAGE_SIZE */ align = 1 << get_random_u32_inclusive(1, 11); pcpu[i] = __alloc_percpu(size, align); if (!pcpu[i]) rv = -1; } for (i = 0; i < 35000; i++) free_percpu(pcpu[i]); vfree(pcpu); #endif return rv; } struct test_kvfree_rcu { struct rcu_head rcu; unsigned char array[20]; }; static int kvfree_rcu_1_arg_vmalloc_test(void) { struct test_kvfree_rcu *p; int i; for (i = 0; i < test_loop_count; i++) { p = vmalloc(1 * PAGE_SIZE); if (!p) return -1; p->array[0] = 'a'; kvfree_rcu_mightsleep(p); } return 0; } static int kvfree_rcu_2_arg_vmalloc_test(void) { struct test_kvfree_rcu *p; int i; for (i = 0; i < test_loop_count; i++) { p = vmalloc(1 * PAGE_SIZE); if (!p) return -1; p->array[0] = 'a'; kvfree_rcu(p, rcu); } return 0; } static int vm_map_ram_test(void) { unsigned long nr_allocated; unsigned int map_nr_pages; unsigned char *v_ptr; struct page **pages; int i; map_nr_pages = nr_pages > 0 ? nr_pages:1; pages = kcalloc(map_nr_pages, sizeof(struct page *), GFP_KERNEL); if (!pages) return -1; nr_allocated = alloc_pages_bulk_array(GFP_KERNEL, map_nr_pages, pages); if (nr_allocated != map_nr_pages) goto cleanup; /* Run the test loop. */ for (i = 0; i < test_loop_count; i++) { v_ptr = vm_map_ram(pages, map_nr_pages, NUMA_NO_NODE); *v_ptr = 'a'; vm_unmap_ram(v_ptr, map_nr_pages); } cleanup: for (i = 0; i < nr_allocated; i++) __free_page(pages[i]); kfree(pages); /* 0 indicates success. */ return nr_allocated != map_nr_pages; } struct test_case_desc { const char *test_name; int (*test_func)(void); }; static struct test_case_desc test_case_array[] = { { "fix_size_alloc_test", fix_size_alloc_test }, { "full_fit_alloc_test", full_fit_alloc_test }, { "long_busy_list_alloc_test", long_busy_list_alloc_test }, { "random_size_alloc_test", random_size_alloc_test }, { "fix_align_alloc_test", fix_align_alloc_test }, { "random_size_align_alloc_test", random_size_align_alloc_test }, { "align_shift_alloc_test", align_shift_alloc_test }, { "pcpu_alloc_test", pcpu_alloc_test }, { "kvfree_rcu_1_arg_vmalloc_test", kvfree_rcu_1_arg_vmalloc_test }, { "kvfree_rcu_2_arg_vmalloc_test", kvfree_rcu_2_arg_vmalloc_test }, { "vm_map_ram_test", vm_map_ram_test }, /* Add a new test case here. */ }; struct test_case_data { int test_failed; int test_passed; u64 time; }; static struct test_driver { struct task_struct *task; struct test_case_data data[ARRAY_SIZE(test_case_array)]; unsigned long start; unsigned long stop; } *tdriver; static void shuffle_array(int *arr, int n) { int i, j; for (i = n - 1; i > 0; i--) { /* Cut the range. */ j = get_random_u32_below(i); /* Swap indexes. */ swap(arr[i], arr[j]); } } static int test_func(void *private) { struct test_driver *t = private; int random_array[ARRAY_SIZE(test_case_array)]; int index, i, j; ktime_t kt; u64 delta; for (i = 0; i < ARRAY_SIZE(test_case_array); i++) random_array[i] = i; if (!sequential_test_order) shuffle_array(random_array, ARRAY_SIZE(test_case_array)); /* * Block until initialization is done. */ down_read(&prepare_for_test_rwsem); t->start = get_cycles(); for (i = 0; i < ARRAY_SIZE(test_case_array); i++) { index = random_array[i]; /* * Skip tests if run_test_mask has been specified. */ if (!((run_test_mask & (1 << index)) >> index)) continue; kt = ktime_get(); for (j = 0; j < test_repeat_count; j++) { if (!test_case_array[index].test_func()) t->data[index].test_passed++; else t->data[index].test_failed++; } /* * Take an average time that test took. */ delta = (u64) ktime_us_delta(ktime_get(), kt); do_div(delta, (u32) test_repeat_count); t->data[index].time = delta; } t->stop = get_cycles(); up_read(&prepare_for_test_rwsem); test_report_one_done(); /* * Wait for the kthread_stop() call. */ while (!kthread_should_stop()) msleep(10); return 0; } static int init_test_configuration(void) { /* * A maximum number of workers is defined as hard-coded * value and set to USHRT_MAX. We add such gap just in * case and for potential heavy stressing. */ nr_threads = clamp(nr_threads, 1, (int) USHRT_MAX); /* Allocate the space for test instances. */ tdriver = kvcalloc(nr_threads, sizeof(*tdriver), GFP_KERNEL); if (tdriver == NULL) return -1; if (test_repeat_count <= 0) test_repeat_count = 1; if (test_loop_count <= 0) test_loop_count = 1; return 0; } static void do_concurrent_test(void) { int i, ret; /* * Set some basic configurations plus sanity check. */ ret = init_test_configuration(); if (ret < 0) return; /* * Put on hold all workers. */ down_write(&prepare_for_test_rwsem); for (i = 0; i < nr_threads; i++) { struct test_driver *t = &tdriver[i]; t->task = kthread_run(test_func, t, "vmalloc_test/%d", i); if (!IS_ERR(t->task)) /* Success. */ atomic_inc(&test_n_undone); else pr_err("Failed to start %d kthread\n", i); } /* * Now let the workers do their job. */ up_write(&prepare_for_test_rwsem); /* * Sleep quiet until all workers are done with 1 second * interval. Since the test can take a lot of time we * can run into a stack trace of the hung task. That is * why we go with completion_timeout and HZ value. */ do { ret = wait_for_completion_timeout(&test_all_done_comp, HZ); } while (!ret); for (i = 0; i < nr_threads; i++) { struct test_driver *t = &tdriver[i]; int j; if (!IS_ERR(t->task)) kthread_stop(t->task); for (j = 0; j < ARRAY_SIZE(test_case_array); j++) { if (!((run_test_mask & (1 << j)) >> j)) continue; pr_info( "Summary: %s passed: %d failed: %d repeat: %d loops: %d avg: %llu usec\n", test_case_array[j].test_name, t->data[j].test_passed, t->data[j].test_failed, test_repeat_count, test_loop_count, t->data[j].time); } pr_info("All test took worker%d=%lu cycles\n", i, t->stop - t->start); } kvfree(tdriver); } static int vmalloc_test_init(void) { do_concurrent_test(); return -EAGAIN; /* Fail will directly unload the module */ } module_init(vmalloc_test_init) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Uladzislau Rezki"); MODULE_DESCRIPTION("vmalloc test module");
/* * Copyright (C) 2016 Icenowy Zheng <[email protected]> * * This file is dual-licensed: you can use it either under the terms * of the GPL or the X11 license, at your option. Note that this dual * licensing only applies to this file, and not this project as a * whole. * * a) This file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This file is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Or, alternatively, * * b) Permission is hereby granted, free of charge, to any person * obtaining a copy of this software and associated documentation * files (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, * copy, modify, merge, publish, distribute, sublicense, and/or * sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following * conditions: * * The above copyright notice and this permission notice shall be * included in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #ifndef _DT_BINDINGS_RST_SUN8I_R_CCU_H_ #define _DT_BINDINGS_RST_SUN8I_R_CCU_H_ #define RST_APB0_IR 0 #define RST_APB0_TIMER 1 #define RST_APB0_RSB 2 #define RST_APB0_UART 3 /* 4 is reserved for RST_APB0_W1 on A31 */ #define RST_APB0_I2C 5 #endif /* _DT_BINDINGS_RST_SUN8I_R_CCU_H_ */
// SPDX-License-Identifier: GPL-2.0 /* * r8a7796 (R-Car M3-W/W+) Clock Pulse Generator / Module Standby and Software * Reset * * Copyright (C) 2016-2019 Glider bvba * Copyright (C) 2018-2019 Renesas Electronics Corp. * * Based on r8a7795-cpg-mssr.c * * Copyright (C) 2015 Glider bvba * Copyright (C) 2015 Renesas Electronics Corp. */ #include <linux/device.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/soc/renesas/rcar-rst.h> #include <dt-bindings/clock/r8a7796-cpg-mssr.h> #include "renesas-cpg-mssr.h" #include "rcar-gen3-cpg.h" enum clk_ids { /* Core Clock Outputs exported to DT */ LAST_DT_CORE_CLK = R8A7796_CLK_OSC, /* External Input Clocks */ CLK_EXTAL, CLK_EXTALR, /* Internal Core Clocks */ CLK_MAIN, CLK_PLL0, CLK_PLL1, CLK_PLL2, CLK_PLL3, CLK_PLL4, CLK_PLL1_DIV2, CLK_PLL1_DIV4, CLK_S0, CLK_S1, CLK_S2, CLK_S3, CLK_SDSRC, CLK_SSPSRC, CLK_RPCSRC, CLK_RINT, /* Module Clocks */ MOD_CLK_BASE }; static const struct cpg_core_clk r8a7796_core_clks[] __initconst = { /* External Clock Inputs */ DEF_INPUT("extal", CLK_EXTAL), DEF_INPUT("extalr", CLK_EXTALR), /* Internal Core Clocks */ DEF_BASE(".main", CLK_MAIN, CLK_TYPE_GEN3_MAIN, CLK_EXTAL), DEF_BASE(".pll0", CLK_PLL0, CLK_TYPE_GEN3_PLL0, CLK_MAIN), DEF_BASE(".pll1", CLK_PLL1, CLK_TYPE_GEN3_PLL1, CLK_MAIN), DEF_BASE(".pll2", CLK_PLL2, CLK_TYPE_GEN3_PLL2, CLK_MAIN), DEF_BASE(".pll3", CLK_PLL3, CLK_TYPE_GEN3_PLL3, CLK_MAIN), DEF_BASE(".pll4", CLK_PLL4, CLK_TYPE_GEN3_PLL4, CLK_MAIN), DEF_FIXED(".pll1_div2", CLK_PLL1_DIV2, CLK_PLL1, 2, 1), DEF_FIXED(".pll1_div4", CLK_PLL1_DIV4, CLK_PLL1_DIV2, 2, 1), DEF_FIXED(".s0", CLK_S0, CLK_PLL1_DIV2, 2, 1), DEF_FIXED(".s1", CLK_S1, CLK_PLL1_DIV2, 3, 1), DEF_FIXED(".s2", CLK_S2, CLK_PLL1_DIV2, 4, 1), DEF_FIXED(".s3", CLK_S3, CLK_PLL1_DIV2, 6, 1), DEF_FIXED(".sdsrc", CLK_SDSRC, CLK_PLL1_DIV2, 2, 1), DEF_BASE(".rpcsrc", CLK_RPCSRC, CLK_TYPE_GEN3_RPCSRC, CLK_PLL1), DEF_GEN3_OSC(".r", CLK_RINT, CLK_EXTAL, 32), /* Core Clock Outputs */ DEF_GEN3_Z("z", R8A7796_CLK_Z, CLK_TYPE_GEN3_Z, CLK_PLL0, 2, 8), DEF_GEN3_Z("z2", R8A7796_CLK_Z2, CLK_TYPE_GEN3_Z, CLK_PLL2, 2, 0), DEF_GEN3_Z("zg", R8A7796_CLK_ZG, CLK_TYPE_GEN3_ZG, CLK_PLL4, 4, 24), DEF_FIXED("ztr", R8A7796_CLK_ZTR, CLK_PLL1_DIV2, 6, 1), DEF_FIXED("ztrd2", R8A7796_CLK_ZTRD2, CLK_PLL1_DIV2, 12, 1), DEF_FIXED("zt", R8A7796_CLK_ZT, CLK_PLL1_DIV2, 4, 1), DEF_FIXED("zx", R8A7796_CLK_ZX, CLK_PLL1_DIV2, 2, 1), DEF_FIXED("s0d1", R8A7796_CLK_S0D1, CLK_S0, 1, 1), DEF_FIXED("s0d2", R8A7796_CLK_S0D2, CLK_S0, 2, 1), DEF_FIXED("s0d3", R8A7796_CLK_S0D3, CLK_S0, 3, 1), DEF_FIXED("s0d4", R8A7796_CLK_S0D4, CLK_S0, 4, 1), DEF_FIXED("s0d6", R8A7796_CLK_S0D6, CLK_S0, 6, 1), DEF_FIXED("s0d8", R8A7796_CLK_S0D8, CLK_S0, 8, 1), DEF_FIXED("s0d12", R8A7796_CLK_S0D12, CLK_S0, 12, 1), DEF_FIXED("s1d1", R8A7796_CLK_S1D1, CLK_S1, 1, 1), DEF_FIXED("s1d2", R8A7796_CLK_S1D2, CLK_S1, 2, 1), DEF_FIXED("s1d4", R8A7796_CLK_S1D4, CLK_S1, 4, 1), DEF_FIXED("s2d1", R8A7796_CLK_S2D1, CLK_S2, 1, 1), DEF_FIXED("s2d2", R8A7796_CLK_S2D2, CLK_S2, 2, 1), DEF_FIXED("s2d4", R8A7796_CLK_S2D4, CLK_S2, 4, 1), DEF_FIXED("s3d1", R8A7796_CLK_S3D1, CLK_S3, 1, 1), DEF_FIXED("s3d2", R8A7796_CLK_S3D2, CLK_S3, 2, 1), DEF_FIXED("s3d4", R8A7796_CLK_S3D4, CLK_S3, 4, 1), DEF_GEN3_SDH("sd0h", R8A7796_CLK_SD0H, CLK_SDSRC, 0x074), DEF_GEN3_SDH("sd1h", R8A7796_CLK_SD1H, CLK_SDSRC, 0x078), DEF_GEN3_SDH("sd2h", R8A7796_CLK_SD2H, CLK_SDSRC, 0x268), DEF_GEN3_SDH("sd3h", R8A7796_CLK_SD3H, CLK_SDSRC, 0x26c), DEF_GEN3_SD("sd0", R8A7796_CLK_SD0, R8A7796_CLK_SD0H, 0x074), DEF_GEN3_SD("sd1", R8A7796_CLK_SD1, R8A7796_CLK_SD1H, 0x078), DEF_GEN3_SD("sd2", R8A7796_CLK_SD2, R8A7796_CLK_SD2H, 0x268), DEF_GEN3_SD("sd3", R8A7796_CLK_SD3, R8A7796_CLK_SD3H, 0x26c), DEF_BASE("rpc", R8A7796_CLK_RPC, CLK_TYPE_GEN3_RPC, CLK_RPCSRC), DEF_BASE("rpcd2", R8A7796_CLK_RPCD2, CLK_TYPE_GEN3_RPCD2, R8A7796_CLK_RPC), DEF_FIXED("cl", R8A7796_CLK_CL, CLK_PLL1_DIV2, 48, 1), DEF_FIXED("cr", R8A7796_CLK_CR, CLK_PLL1_DIV4, 2, 1), DEF_FIXED("cp", R8A7796_CLK_CP, CLK_EXTAL, 2, 1), DEF_FIXED("cpex", R8A7796_CLK_CPEX, CLK_EXTAL, 2, 1), DEF_DIV6P1("canfd", R8A7796_CLK_CANFD, CLK_PLL1_DIV4, 0x244), DEF_DIV6P1("csi0", R8A7796_CLK_CSI0, CLK_PLL1_DIV4, 0x00c), DEF_DIV6P1("mso", R8A7796_CLK_MSO, CLK_PLL1_DIV4, 0x014), DEF_DIV6P1("hdmi", R8A7796_CLK_HDMI, CLK_PLL1_DIV4, 0x250), DEF_GEN3_OSC("osc", R8A7796_CLK_OSC, CLK_EXTAL, 8), DEF_BASE("r", R8A7796_CLK_R, CLK_TYPE_GEN3_R, CLK_RINT), }; static struct mssr_mod_clk r8a7796_mod_clks[] __initdata = { DEF_MOD("3dge", 112, R8A7796_CLK_ZG), DEF_MOD("fdp1-0", 119, R8A7796_CLK_S0D1), DEF_MOD("tmu4", 121, R8A7796_CLK_S0D6), DEF_MOD("tmu3", 122, R8A7796_CLK_S3D2), DEF_MOD("tmu2", 123, R8A7796_CLK_S3D2), DEF_MOD("tmu1", 124, R8A7796_CLK_S3D2), DEF_MOD("tmu0", 125, R8A7796_CLK_CP), DEF_MOD("scif5", 202, R8A7796_CLK_S3D4), DEF_MOD("scif4", 203, R8A7796_CLK_S3D4), DEF_MOD("scif3", 204, R8A7796_CLK_S3D4), DEF_MOD("scif1", 206, R8A7796_CLK_S3D4), DEF_MOD("scif0", 207, R8A7796_CLK_S3D4), DEF_MOD("msiof3", 208, R8A7796_CLK_MSO), DEF_MOD("msiof2", 209, R8A7796_CLK_MSO), DEF_MOD("msiof1", 210, R8A7796_CLK_MSO), DEF_MOD("msiof0", 211, R8A7796_CLK_MSO), DEF_MOD("sys-dmac2", 217, R8A7796_CLK_S3D1), DEF_MOD("sys-dmac1", 218, R8A7796_CLK_S3D1), DEF_MOD("sys-dmac0", 219, R8A7796_CLK_S0D3), DEF_MOD("sceg-pub", 229, R8A7796_CLK_CR), DEF_MOD("cmt3", 300, R8A7796_CLK_R), DEF_MOD("cmt2", 301, R8A7796_CLK_R), DEF_MOD("cmt1", 302, R8A7796_CLK_R), DEF_MOD("cmt0", 303, R8A7796_CLK_R), DEF_MOD("tpu0", 304, R8A7796_CLK_S3D4), DEF_MOD("scif2", 310, R8A7796_CLK_S3D4), DEF_MOD("sdif3", 311, R8A7796_CLK_SD3), DEF_MOD("sdif2", 312, R8A7796_CLK_SD2), DEF_MOD("sdif1", 313, R8A7796_CLK_SD1), DEF_MOD("sdif0", 314, R8A7796_CLK_SD0), DEF_MOD("pcie1", 318, R8A7796_CLK_S3D1), DEF_MOD("pcie0", 319, R8A7796_CLK_S3D1), DEF_MOD("usb3-if0", 328, R8A7796_CLK_S3D1), DEF_MOD("usb-dmac0", 330, R8A7796_CLK_S3D1), DEF_MOD("usb-dmac1", 331, R8A7796_CLK_S3D1), DEF_MOD("rwdt", 402, R8A7796_CLK_R), DEF_MOD("intc-ex", 407, R8A7796_CLK_CP), DEF_MOD("intc-ap", 408, R8A7796_CLK_S0D3), DEF_MOD("audmac1", 501, R8A7796_CLK_S1D2), DEF_MOD("audmac0", 502, R8A7796_CLK_S1D2), DEF_MOD("drif31", 508, R8A7796_CLK_S3D2), DEF_MOD("drif30", 509, R8A7796_CLK_S3D2), DEF_MOD("drif21", 510, R8A7796_CLK_S3D2), DEF_MOD("drif20", 511, R8A7796_CLK_S3D2), DEF_MOD("drif11", 512, R8A7796_CLK_S3D2), DEF_MOD("drif10", 513, R8A7796_CLK_S3D2), DEF_MOD("drif01", 514, R8A7796_CLK_S3D2), DEF_MOD("drif00", 515, R8A7796_CLK_S3D2), DEF_MOD("hscif4", 516, R8A7796_CLK_S3D1), DEF_MOD("hscif3", 517, R8A7796_CLK_S3D1), DEF_MOD("hscif2", 518, R8A7796_CLK_S3D1), DEF_MOD("hscif1", 519, R8A7796_CLK_S3D1), DEF_MOD("hscif0", 520, R8A7796_CLK_S3D1), DEF_MOD("thermal", 522, R8A7796_CLK_CP), DEF_MOD("pwm", 523, R8A7796_CLK_S0D12), DEF_MOD("fcpvd2", 601, R8A7796_CLK_S0D2), DEF_MOD("fcpvd1", 602, R8A7796_CLK_S0D2), DEF_MOD("fcpvd0", 603, R8A7796_CLK_S0D2), DEF_MOD("fcpvb0", 607, R8A7796_CLK_S0D1), DEF_MOD("fcpvi0", 611, R8A7796_CLK_S0D1), DEF_MOD("fcpf0", 615, R8A7796_CLK_S0D1), DEF_MOD("fcpci0", 617, R8A7796_CLK_S0D2), DEF_MOD("fcpcs", 619, R8A7796_CLK_S0D2), DEF_MOD("vspd2", 621, R8A7796_CLK_S0D2), DEF_MOD("vspd1", 622, R8A7796_CLK_S0D2), DEF_MOD("vspd0", 623, R8A7796_CLK_S0D2), DEF_MOD("vspb", 626, R8A7796_CLK_S0D1), DEF_MOD("vspi0", 631, R8A7796_CLK_S0D1), DEF_MOD("ehci1", 702, R8A7796_CLK_S3D2), DEF_MOD("ehci0", 703, R8A7796_CLK_S3D2), DEF_MOD("hsusb", 704, R8A7796_CLK_S3D2), DEF_MOD("cmm2", 709, R8A7796_CLK_S2D1), DEF_MOD("cmm1", 710, R8A7796_CLK_S2D1), DEF_MOD("cmm0", 711, R8A7796_CLK_S2D1), DEF_MOD("csi20", 714, R8A7796_CLK_CSI0), DEF_MOD("csi40", 716, R8A7796_CLK_CSI0), DEF_MOD("du2", 722, R8A7796_CLK_S2D1), DEF_MOD("du1", 723, R8A7796_CLK_S2D1), DEF_MOD("du0", 724, R8A7796_CLK_S2D1), DEF_MOD("lvds", 727, R8A7796_CLK_S2D1), DEF_MOD("hdmi0", 729, R8A7796_CLK_HDMI), DEF_MOD("mlp", 802, R8A7796_CLK_S2D1), DEF_MOD("vin7", 804, R8A7796_CLK_S0D2), DEF_MOD("vin6", 805, R8A7796_CLK_S0D2), DEF_MOD("vin5", 806, R8A7796_CLK_S0D2), DEF_MOD("vin4", 807, R8A7796_CLK_S0D2), DEF_MOD("vin3", 808, R8A7796_CLK_S0D2), DEF_MOD("vin2", 809, R8A7796_CLK_S0D2), DEF_MOD("vin1", 810, R8A7796_CLK_S0D2), DEF_MOD("vin0", 811, R8A7796_CLK_S0D2), DEF_MOD("etheravb", 812, R8A7796_CLK_S0D6), DEF_MOD("imr1", 822, R8A7796_CLK_S0D2), DEF_MOD("imr0", 823, R8A7796_CLK_S0D2), DEF_MOD("gpio7", 905, R8A7796_CLK_S3D4), DEF_MOD("gpio6", 906, R8A7796_CLK_S3D4), DEF_MOD("gpio5", 907, R8A7796_CLK_S3D4), DEF_MOD("gpio4", 908, R8A7796_CLK_S3D4), DEF_MOD("gpio3", 909, R8A7796_CLK_S3D4), DEF_MOD("gpio2", 910, R8A7796_CLK_S3D4), DEF_MOD("gpio1", 911, R8A7796_CLK_S3D4), DEF_MOD("gpio0", 912, R8A7796_CLK_S3D4), DEF_MOD("can-fd", 914, R8A7796_CLK_S3D2), DEF_MOD("can-if1", 915, R8A7796_CLK_S3D4), DEF_MOD("can-if0", 916, R8A7796_CLK_S3D4), DEF_MOD("rpc-if", 917, R8A7796_CLK_RPCD2), DEF_MOD("i2c6", 918, R8A7796_CLK_S0D6), DEF_MOD("i2c5", 919, R8A7796_CLK_S0D6), DEF_MOD("adg", 922, R8A7796_CLK_S0D4), DEF_MOD("i2c-dvfs", 926, R8A7796_CLK_CP), DEF_MOD("i2c4", 927, R8A7796_CLK_S0D6), DEF_MOD("i2c3", 928, R8A7796_CLK_S0D6), DEF_MOD("i2c2", 929, R8A7796_CLK_S3D2), DEF_MOD("i2c1", 930, R8A7796_CLK_S3D2), DEF_MOD("i2c0", 931, R8A7796_CLK_S3D2), DEF_MOD("ssi-all", 1005, R8A7796_CLK_S3D4), DEF_MOD("ssi9", 1006, MOD_CLK_ID(1005)), DEF_MOD("ssi8", 1007, MOD_CLK_ID(1005)), DEF_MOD("ssi7", 1008, MOD_CLK_ID(1005)), DEF_MOD("ssi6", 1009, MOD_CLK_ID(1005)), DEF_MOD("ssi5", 1010, MOD_CLK_ID(1005)), DEF_MOD("ssi4", 1011, MOD_CLK_ID(1005)), DEF_MOD("ssi3", 1012, MOD_CLK_ID(1005)), DEF_MOD("ssi2", 1013, MOD_CLK_ID(1005)), DEF_MOD("ssi1", 1014, MOD_CLK_ID(1005)), DEF_MOD("ssi0", 1015, MOD_CLK_ID(1005)), DEF_MOD("scu-all", 1017, R8A7796_CLK_S3D4), DEF_MOD("scu-dvc1", 1018, MOD_CLK_ID(1017)), DEF_MOD("scu-dvc0", 1019, MOD_CLK_ID(1017)), DEF_MOD("scu-ctu1-mix1", 1020, MOD_CLK_ID(1017)), DEF_MOD("scu-ctu0-mix0", 1021, MOD_CLK_ID(1017)), DEF_MOD("scu-src9", 1022, MOD_CLK_ID(1017)), DEF_MOD("scu-src8", 1023, MOD_CLK_ID(1017)), DEF_MOD("scu-src7", 1024, MOD_CLK_ID(1017)), DEF_MOD("scu-src6", 1025, MOD_CLK_ID(1017)), DEF_MOD("scu-src5", 1026, MOD_CLK_ID(1017)), DEF_MOD("scu-src4", 1027, MOD_CLK_ID(1017)), DEF_MOD("scu-src3", 1028, MOD_CLK_ID(1017)), DEF_MOD("scu-src2", 1029, MOD_CLK_ID(1017)), DEF_MOD("scu-src1", 1030, MOD_CLK_ID(1017)), DEF_MOD("scu-src0", 1031, MOD_CLK_ID(1017)), }; static const unsigned int r8a7796_crit_mod_clks[] __initconst = { MOD_CLK_ID(402), /* RWDT */ MOD_CLK_ID(408), /* INTC-AP (GIC) */ }; /* * CPG Clock Data */ /* * MD EXTAL PLL0 PLL1 PLL2 PLL3 PLL4 OSC * 14 13 19 17 (MHz) *------------------------------------------------------------------------- * 0 0 0 0 16.66 x 1 x180 x192 x144 x192 x144 /16 * 0 0 0 1 16.66 x 1 x180 x192 x144 x128 x144 /16 * 0 0 1 0 Prohibited setting * 0 0 1 1 16.66 x 1 x180 x192 x144 x192 x144 /16 * 0 1 0 0 20 x 1 x150 x160 x120 x160 x120 /19 * 0 1 0 1 20 x 1 x150 x160 x120 x106 x120 /19 * 0 1 1 0 Prohibited setting * 0 1 1 1 20 x 1 x150 x160 x120 x160 x120 /19 * 1 0 0 0 25 x 1 x120 x128 x96 x128 x96 /24 * 1 0 0 1 25 x 1 x120 x128 x96 x84 x96 /24 * 1 0 1 0 Prohibited setting * 1 0 1 1 25 x 1 x120 x128 x96 x128 x96 /24 * 1 1 0 0 33.33 / 2 x180 x192 x144 x192 x144 /32 * 1 1 0 1 33.33 / 2 x180 x192 x144 x128 x144 /32 * 1 1 1 0 Prohibited setting * 1 1 1 1 33.33 / 2 x180 x192 x144 x192 x144 /32 */ #define CPG_PLL_CONFIG_INDEX(md) ((((md) & BIT(14)) >> 11) | \ (((md) & BIT(13)) >> 11) | \ (((md) & BIT(19)) >> 18) | \ (((md) & BIT(17)) >> 17)) static const struct rcar_gen3_cpg_pll_config cpg_pll_configs[16] __initconst = { /* EXTAL div PLL1 mult/div PLL3 mult/div OSC prediv */ { 1, 192, 1, 192, 1, 16, }, { 1, 192, 1, 128, 1, 16, }, { 0, /* Prohibited setting */ }, { 1, 192, 1, 192, 1, 16, }, { 1, 160, 1, 160, 1, 19, }, { 1, 160, 1, 106, 1, 19, }, { 0, /* Prohibited setting */ }, { 1, 160, 1, 160, 1, 19, }, { 1, 128, 1, 128, 1, 24, }, { 1, 128, 1, 84, 1, 24, }, { 0, /* Prohibited setting */ }, { 1, 128, 1, 128, 1, 24, }, { 2, 192, 1, 192, 1, 32, }, { 2, 192, 1, 128, 1, 32, }, { 0, /* Prohibited setting */ }, { 2, 192, 1, 192, 1, 32, }, }; /* * Fixups for R-Car M3-W+ */ static const unsigned int r8a77961_mod_nullify[] __initconst = { MOD_CLK_ID(617), /* FCPCI0 */ }; static int __init r8a7796_cpg_mssr_init(struct device *dev) { const struct rcar_gen3_cpg_pll_config *cpg_pll_config; u32 cpg_mode; int error; error = rcar_rst_read_mode_pins(&cpg_mode); if (error) return error; cpg_pll_config = &cpg_pll_configs[CPG_PLL_CONFIG_INDEX(cpg_mode)]; if (!cpg_pll_config->extal_div) { dev_err(dev, "Prohibited setting (cpg_mode=0x%x)\n", cpg_mode); return -EINVAL; } if (of_device_is_compatible(dev->of_node, "renesas,r8a77961-cpg-mssr")) mssr_mod_nullify(r8a7796_mod_clks, ARRAY_SIZE(r8a7796_mod_clks), r8a77961_mod_nullify, ARRAY_SIZE(r8a77961_mod_nullify)); return rcar_gen3_cpg_init(cpg_pll_config, CLK_EXTALR, cpg_mode); } const struct cpg_mssr_info r8a7796_cpg_mssr_info __initconst = { /* Core Clocks */ .core_clks = r8a7796_core_clks, .num_core_clks = ARRAY_SIZE(r8a7796_core_clks), .last_dt_core_clk = LAST_DT_CORE_CLK, .num_total_core_clks = MOD_CLK_BASE, /* Module Clocks */ .mod_clks = r8a7796_mod_clks, .num_mod_clks = ARRAY_SIZE(r8a7796_mod_clks), .num_hw_mod_clks = 12 * 32, /* Critical Module Clocks */ .crit_mod_clks = r8a7796_crit_mod_clks, .num_crit_mod_clks = ARRAY_SIZE(r8a7796_crit_mod_clks), /* Callbacks */ .init = r8a7796_cpg_mssr_init, .cpg_clk_register = rcar_gen3_cpg_clk_register, };
// SPDX-License-Identifier: GPL-2.0-or-later /* * pervasive backend for the cbe_cpufreq driver * * This driver makes use of the pervasive unit to * engage the desired frequency. * * (C) Copyright IBM Deutschland Entwicklung GmbH 2005-2007 * * Author: Christian Krafft <[email protected]> */ #include <linux/io.h> #include <linux/kernel.h> #include <linux/time.h> #include <asm/machdep.h> #include <asm/hw_irq.h> #include <asm/cell-regs.h> #include "ppc_cbe_cpufreq.h" /* to write to MIC register */ static u64 MIC_Slow_Fast_Timer_table[] = { [0 ... 7] = 0x007fc00000000000ull, }; /* more values for the MIC */ static u64 MIC_Slow_Next_Timer_table[] = { 0x0000240000000000ull, 0x0000268000000000ull, 0x000029C000000000ull, 0x00002D0000000000ull, 0x0000300000000000ull, 0x0000334000000000ull, 0x000039C000000000ull, 0x00003FC000000000ull, }; int cbe_cpufreq_set_pmode(int cpu, unsigned int pmode) { struct cbe_pmd_regs __iomem *pmd_regs; struct cbe_mic_tm_regs __iomem *mic_tm_regs; unsigned long flags; u64 value; #ifdef DEBUG long time; #endif local_irq_save(flags); mic_tm_regs = cbe_get_cpu_mic_tm_regs(cpu); pmd_regs = cbe_get_cpu_pmd_regs(cpu); #ifdef DEBUG time = jiffies; #endif out_be64(&mic_tm_regs->slow_fast_timer_0, MIC_Slow_Fast_Timer_table[pmode]); out_be64(&mic_tm_regs->slow_fast_timer_1, MIC_Slow_Fast_Timer_table[pmode]); out_be64(&mic_tm_regs->slow_next_timer_0, MIC_Slow_Next_Timer_table[pmode]); out_be64(&mic_tm_regs->slow_next_timer_1, MIC_Slow_Next_Timer_table[pmode]); value = in_be64(&pmd_regs->pmcr); /* set bits to zero */ value &= 0xFFFFFFFFFFFFFFF8ull; /* set bits to next pmode */ value |= pmode; out_be64(&pmd_regs->pmcr, value); #ifdef DEBUG /* wait until new pmode appears in status register */ value = in_be64(&pmd_regs->pmsr) & 0x07; while (value != pmode) { cpu_relax(); value = in_be64(&pmd_regs->pmsr) & 0x07; } time = jiffies - time; time = jiffies_to_msecs(time); pr_debug("had to wait %lu ms for a transition using " \ "pervasive unit\n", time); #endif local_irq_restore(flags); return 0; } int cbe_cpufreq_get_pmode(int cpu) { int ret; struct cbe_pmd_regs __iomem *pmd_regs; pmd_regs = cbe_get_cpu_pmd_regs(cpu); ret = in_be64(&pmd_regs->pmsr) & 0x07; return ret; }
// SPDX-License-Identifier: GPL-2.0 /* * Conexant Digicolor timer driver * * Author: Baruch Siach <[email protected]> * * Copyright (C) 2014 Paradox Innovation Ltd. * * Based on: * Allwinner SoCs hstimer driver * * Copyright (C) 2013 Maxime Ripard * * Maxime Ripard <[email protected]> */ /* * Conexant Digicolor SoCs have 8 configurable timers, named from "Timer A" to * "Timer H". Timer A is the only one with watchdog support, so it is dedicated * to the watchdog driver. This driver uses Timer B for sched_clock(), and * Timer C for clockevents. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqreturn.h> #include <linux/sched/clock.h> #include <linux/sched_clock.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> enum { TIMER_A, TIMER_B, TIMER_C, TIMER_D, TIMER_E, TIMER_F, TIMER_G, TIMER_H, }; #define CONTROL(t) ((t)*8) #define COUNT(t) ((t)*8 + 4) #define CONTROL_DISABLE 0 #define CONTROL_ENABLE BIT(0) #define CONTROL_MODE(m) ((m) << 4) #define CONTROL_MODE_ONESHOT CONTROL_MODE(1) #define CONTROL_MODE_PERIODIC CONTROL_MODE(2) struct digicolor_timer { struct clock_event_device ce; void __iomem *base; u32 ticks_per_jiffy; int timer_id; /* one of TIMER_* */ }; static struct digicolor_timer *dc_timer(struct clock_event_device *ce) { return container_of(ce, struct digicolor_timer, ce); } static inline void dc_timer_disable(struct clock_event_device *ce) { struct digicolor_timer *dt = dc_timer(ce); writeb(CONTROL_DISABLE, dt->base + CONTROL(dt->timer_id)); } static inline void dc_timer_enable(struct clock_event_device *ce, u32 mode) { struct digicolor_timer *dt = dc_timer(ce); writeb(CONTROL_ENABLE | mode, dt->base + CONTROL(dt->timer_id)); } static inline void dc_timer_set_count(struct clock_event_device *ce, unsigned long count) { struct digicolor_timer *dt = dc_timer(ce); writel(count, dt->base + COUNT(dt->timer_id)); } static int digicolor_clkevt_shutdown(struct clock_event_device *ce) { dc_timer_disable(ce); return 0; } static int digicolor_clkevt_set_oneshot(struct clock_event_device *ce) { dc_timer_disable(ce); dc_timer_enable(ce, CONTROL_MODE_ONESHOT); return 0; } static int digicolor_clkevt_set_periodic(struct clock_event_device *ce) { struct digicolor_timer *dt = dc_timer(ce); dc_timer_disable(ce); dc_timer_set_count(ce, dt->ticks_per_jiffy); dc_timer_enable(ce, CONTROL_MODE_PERIODIC); return 0; } static int digicolor_clkevt_next_event(unsigned long evt, struct clock_event_device *ce) { dc_timer_disable(ce); dc_timer_set_count(ce, evt); dc_timer_enable(ce, CONTROL_MODE_ONESHOT); return 0; } static struct digicolor_timer dc_timer_dev = { .ce = { .name = "digicolor_tick", .rating = 340, .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .set_state_shutdown = digicolor_clkevt_shutdown, .set_state_periodic = digicolor_clkevt_set_periodic, .set_state_oneshot = digicolor_clkevt_set_oneshot, .tick_resume = digicolor_clkevt_shutdown, .set_next_event = digicolor_clkevt_next_event, }, .timer_id = TIMER_C, }; static irqreturn_t digicolor_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = dev_id; evt->event_handler(evt); return IRQ_HANDLED; } static u64 notrace digicolor_timer_sched_read(void) { return ~readl(dc_timer_dev.base + COUNT(TIMER_B)); } static int __init digicolor_timer_init(struct device_node *node) { unsigned long rate; struct clk *clk; int ret, irq; /* * timer registers are shared with the watchdog timer; * don't map exclusively */ dc_timer_dev.base = of_iomap(node, 0); if (!dc_timer_dev.base) { pr_err("Can't map registers\n"); return -ENXIO; } irq = irq_of_parse_and_map(node, dc_timer_dev.timer_id); if (irq <= 0) { pr_err("Can't parse IRQ\n"); return -EINVAL; } clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("Can't get timer clock\n"); return PTR_ERR(clk); } clk_prepare_enable(clk); rate = clk_get_rate(clk); dc_timer_dev.ticks_per_jiffy = DIV_ROUND_UP(rate, HZ); writeb(CONTROL_DISABLE, dc_timer_dev.base + CONTROL(TIMER_B)); writel(UINT_MAX, dc_timer_dev.base + COUNT(TIMER_B)); writeb(CONTROL_ENABLE, dc_timer_dev.base + CONTROL(TIMER_B)); sched_clock_register(digicolor_timer_sched_read, 32, rate); clocksource_mmio_init(dc_timer_dev.base + COUNT(TIMER_B), node->name, rate, 340, 32, clocksource_mmio_readl_down); ret = request_irq(irq, digicolor_timer_interrupt, IRQF_TIMER | IRQF_IRQPOLL, "digicolor_timerC", &dc_timer_dev.ce); if (ret) { pr_warn("request of timer irq %d failed (%d)\n", irq, ret); return ret; } dc_timer_dev.ce.cpumask = cpu_possible_mask; dc_timer_dev.ce.irq = irq; clockevents_config_and_register(&dc_timer_dev.ce, rate, 0, 0xffffffff); return 0; } TIMER_OF_DECLARE(conexant_digicolor, "cnxt,cx92755-timer", digicolor_timer_init);
/* SPDX-License-Identifier: GPL-2.0 */ /* * Defines for the Maxlinear MX58x family of tuners/demods * * Copyright (C) 2014 Digital Devices GmbH * * based on code: * Copyright (c) 2011-2013 MaxLinear, Inc. All rights reserved * which was released under GPL V2 */ enum MXL_BOOL_E { MXL_DISABLE = 0, MXL_ENABLE = 1, MXL_FALSE = 0, MXL_TRUE = 1, MXL_INVALID = 0, MXL_VALID = 1, MXL_NO = 0, MXL_YES = 1, MXL_OFF = 0, MXL_ON = 1 }; /* Firmware-Host Command IDs */ enum MXL_HYDRA_HOST_CMD_ID_E { /* --Device command IDs-- */ MXL_HYDRA_DEV_NO_OP_CMD = 0, /* No OP */ MXL_HYDRA_DEV_SET_POWER_MODE_CMD = 1, MXL_HYDRA_DEV_SET_OVERWRITE_DEF_CMD = 2, /* Host-used CMD, not used by firmware */ MXL_HYDRA_DEV_FIRMWARE_DOWNLOAD_CMD = 3, /* Additional CONTROL types from DTV */ MXL_HYDRA_DEV_SET_BROADCAST_PID_STB_ID_CMD = 4, MXL_HYDRA_DEV_GET_PMM_SLEEP_CMD = 5, /* --Tuner command IDs-- */ MXL_HYDRA_TUNER_TUNE_CMD = 6, MXL_HYDRA_TUNER_GET_STATUS_CMD = 7, /* --Demod command IDs-- */ MXL_HYDRA_DEMOD_SET_PARAM_CMD = 8, MXL_HYDRA_DEMOD_GET_STATUS_CMD = 9, MXL_HYDRA_DEMOD_RESET_FEC_COUNTER_CMD = 10, MXL_HYDRA_DEMOD_SET_PKT_NUM_CMD = 11, MXL_HYDRA_DEMOD_SET_IQ_SOURCE_CMD = 12, MXL_HYDRA_DEMOD_GET_IQ_DATA_CMD = 13, MXL_HYDRA_DEMOD_GET_M68HC05_VER_CMD = 14, MXL_HYDRA_DEMOD_SET_ERROR_COUNTER_MODE_CMD = 15, /* --- ABORT channel tune */ MXL_HYDRA_ABORT_TUNE_CMD = 16, /* Abort current tune command. */ /* --SWM/FSK command IDs-- */ MXL_HYDRA_FSK_RESET_CMD = 17, MXL_HYDRA_FSK_MSG_CMD = 18, MXL_HYDRA_FSK_SET_OP_MODE_CMD = 19, /* --DiSeqC command IDs-- */ MXL_HYDRA_DISEQC_MSG_CMD = 20, MXL_HYDRA_DISEQC_COPY_MSG_TO_MAILBOX = 21, MXL_HYDRA_DISEQC_CFG_MSG_CMD = 22, /* --- FFT Debug Command IDs-- */ MXL_HYDRA_REQ_FFT_SPECTRUM_CMD = 23, /* -- Demod scramblle code */ MXL_HYDRA_DEMOD_SCRAMBLE_CODE_CMD = 24, /* ---For host to know how many commands in total */ MXL_HYDRA_LAST_HOST_CMD = 25, MXL_HYDRA_DEMOD_INTR_TYPE_CMD = 47, MXL_HYDRA_DEV_INTR_CLEAR_CMD = 48, MXL_HYDRA_TUNER_SPECTRUM_REQ_CMD = 53, MXL_HYDRA_TUNER_ACTIVATE_CMD = 55, MXL_HYDRA_DEV_CFG_POWER_MODE_CMD = 56, MXL_HYDRA_DEV_XTAL_CAP_CMD = 57, MXL_HYDRA_DEV_CFG_SKU_CMD = 58, MXL_HYDRA_TUNER_SPECTRUM_MIN_GAIN_CMD = 59, MXL_HYDRA_DISEQC_CONT_TONE_CFG = 60, MXL_HYDRA_DEV_RF_WAKE_UP_CMD = 61, MXL_HYDRA_DEMOD_CFG_EQ_CTRL_PARAM_CMD = 62, MXL_HYDRA_DEMOD_FREQ_OFFSET_SEARCH_RANGE_CMD = 63, MXL_HYDRA_DEV_REQ_PWR_FROM_ADCRSSI_CMD = 64, MXL_XCPU_PID_FLT_CFG_CMD = 65, MXL_XCPU_SHMEM_TEST_CMD = 66, MXL_XCPU_ABORT_TUNE_CMD = 67, MXL_XCPU_CHAN_TUNE_CMD = 68, MXL_XCPU_FLT_BOND_HDRS_CMD = 69, MXL_HYDRA_DEV_BROADCAST_WAKE_UP_CMD = 70, MXL_HYDRA_FSK_CFG_FSK_FREQ_CMD = 71, MXL_HYDRA_FSK_POWER_DOWN_CMD = 72, MXL_XCPU_CLEAR_CB_STATS_CMD = 73, MXL_XCPU_CHAN_BOND_RESTART_CMD = 74 }; #define MXL_ENABLE_BIG_ENDIAN (0) #define MXL_HYDRA_OEM_MAX_BLOCK_WRITE_LENGTH 248 #define MXL_HYDRA_OEM_MAX_CMD_BUFF_LEN (248) #define MXL_HYDRA_CAP_MIN 10 #define MXL_HYDRA_CAP_MAX 33 #define MXL_HYDRA_PLID_REG_READ 0xFB /* Read register PLID */ #define MXL_HYDRA_PLID_REG_WRITE 0xFC /* Write register PLID */ #define MXL_HYDRA_PLID_CMD_READ 0xFD /* Command Read PLID */ #define MXL_HYDRA_PLID_CMD_WRITE 0xFE /* Command Write PLID */ #define MXL_HYDRA_REG_SIZE_IN_BYTES 4 /* Hydra register size in bytes */ #define MXL_HYDRA_I2C_HDR_SIZE (2 * sizeof(u8)) /* PLID + LEN(0xFF) */ #define MXL_HYDRA_CMD_HEADER_SIZE (MXL_HYDRA_REG_SIZE_IN_BYTES + MXL_HYDRA_I2C_HDR_SIZE) #define MXL_HYDRA_SKU_ID_581 0 #define MXL_HYDRA_SKU_ID_584 1 #define MXL_HYDRA_SKU_ID_585 2 #define MXL_HYDRA_SKU_ID_544 3 #define MXL_HYDRA_SKU_ID_561 4 #define MXL_HYDRA_SKU_ID_582 5 #define MXL_HYDRA_SKU_ID_568 6 /* macro for register write data buffer size * (PLID + LEN (0xFF) + RegAddr + RegData) */ #define MXL_HYDRA_REG_WRITE_LEN (MXL_HYDRA_I2C_HDR_SIZE + (2 * MXL_HYDRA_REG_SIZE_IN_BYTES)) /* macro to extract a single byte from 4-byte(32-bit) data */ #define GET_BYTE(x, n) (((x) >> (8*(n))) & 0xFF) #define MAX_CMD_DATA 512 #define MXL_GET_REG_MASK_32(lsb_loc, num_of_bits) ((0xFFFFFFFF >> (32 - (num_of_bits))) << (lsb_loc)) #define FW_DL_SIGN (0xDEADBEEF) #define MBIN_FORMAT_VERSION '1' #define MBIN_FILE_HEADER_ID 'M' #define MBIN_SEGMENT_HEADER_ID 'S' #define MBIN_MAX_FILE_LENGTH (1<<23) struct MBIN_FILE_HEADER_T { u8 id; u8 fmt_version; u8 header_len; u8 num_segments; u8 entry_address[4]; u8 image_size24[3]; u8 image_checksum; u8 reserved[4]; }; struct MBIN_FILE_T { struct MBIN_FILE_HEADER_T header; u8 data[]; }; struct MBIN_SEGMENT_HEADER_T { u8 id; u8 len24[3]; u8 address[4]; }; struct MBIN_SEGMENT_T { struct MBIN_SEGMENT_HEADER_T header; u8 data[]; }; enum MXL_CMD_TYPE_E { MXL_CMD_WRITE = 0, MXL_CMD_READ }; #define BUILD_HYDRA_CMD(cmd_id, req_type, size, data_ptr, cmd_buff) \ do { \ cmd_buff[0] = ((req_type == MXL_CMD_WRITE) ? MXL_HYDRA_PLID_CMD_WRITE : MXL_HYDRA_PLID_CMD_READ); \ cmd_buff[1] = (size > 251) ? 0xff : (u8) (size + 4); \ cmd_buff[2] = size; \ cmd_buff[3] = cmd_id; \ cmd_buff[4] = 0x00; \ cmd_buff[5] = 0x00; \ convert_endian(MXL_ENABLE_BIG_ENDIAN, size, (u8 *)data_ptr); \ memcpy((void *)&cmd_buff[6], data_ptr, size); \ } while (0) struct MXL_REG_FIELD_T { u32 reg_addr; u8 lsb_pos; u8 num_of_bits; }; struct MXL_DEV_CMD_DATA_T { u32 data_size; u8 data[MAX_CMD_DATA]; }; enum MXL_HYDRA_SKU_TYPE_E { MXL_HYDRA_SKU_TYPE_MIN = 0x00, MXL_HYDRA_SKU_TYPE_581 = 0x00, MXL_HYDRA_SKU_TYPE_584 = 0x01, MXL_HYDRA_SKU_TYPE_585 = 0x02, MXL_HYDRA_SKU_TYPE_544 = 0x03, MXL_HYDRA_SKU_TYPE_561 = 0x04, MXL_HYDRA_SKU_TYPE_5XX = 0x05, MXL_HYDRA_SKU_TYPE_5YY = 0x06, MXL_HYDRA_SKU_TYPE_511 = 0x07, MXL_HYDRA_SKU_TYPE_561_DE = 0x08, MXL_HYDRA_SKU_TYPE_582 = 0x09, MXL_HYDRA_SKU_TYPE_541 = 0x0A, MXL_HYDRA_SKU_TYPE_568 = 0x0B, MXL_HYDRA_SKU_TYPE_542 = 0x0C, MXL_HYDRA_SKU_TYPE_MAX = 0x0D, }; struct MXL_HYDRA_SKU_COMMAND_T { enum MXL_HYDRA_SKU_TYPE_E sku_type; }; enum MXL_HYDRA_DEMOD_ID_E { MXL_HYDRA_DEMOD_ID_0 = 0, MXL_HYDRA_DEMOD_ID_1, MXL_HYDRA_DEMOD_ID_2, MXL_HYDRA_DEMOD_ID_3, MXL_HYDRA_DEMOD_ID_4, MXL_HYDRA_DEMOD_ID_5, MXL_HYDRA_DEMOD_ID_6, MXL_HYDRA_DEMOD_ID_7, MXL_HYDRA_DEMOD_MAX }; #define MXL_DEMOD_SCRAMBLE_SEQ_LEN 12 #define MAX_STEP_SIZE_24_XTAL_102_05_KHZ 195 #define MAX_STEP_SIZE_24_XTAL_204_10_KHZ 215 #define MAX_STEP_SIZE_24_XTAL_306_15_KHZ 203 #define MAX_STEP_SIZE_24_XTAL_408_20_KHZ 177 #define MAX_STEP_SIZE_27_XTAL_102_05_KHZ 195 #define MAX_STEP_SIZE_27_XTAL_204_10_KHZ 215 #define MAX_STEP_SIZE_27_XTAL_306_15_KHZ 203 #define MAX_STEP_SIZE_27_XTAL_408_20_KHZ 177 #define MXL_HYDRA_SPECTRUM_MIN_FREQ_KHZ 300000 #define MXL_HYDRA_SPECTRUM_MAX_FREQ_KHZ 2350000 enum MXL_DEMOD_CHAN_PARAMS_OFFSET_E { DMD_STANDARD_ADDR = 0, DMD_SPECTRUM_INVERSION_ADDR, DMD_SPECTRUM_ROLL_OFF_ADDR, DMD_SYMBOL_RATE_ADDR, DMD_MODULATION_SCHEME_ADDR, DMD_FEC_CODE_RATE_ADDR, DMD_SNR_ADDR, DMD_FREQ_OFFSET_ADDR, DMD_CTL_FREQ_OFFSET_ADDR, DMD_STR_FREQ_OFFSET_ADDR, DMD_FTL_FREQ_OFFSET_ADDR, DMD_STR_NBC_SYNC_LOCK_ADDR, DMD_CYCLE_SLIP_COUNT_ADDR, DMD_DISPLAY_IQ_ADDR, DMD_DVBS2_CRC_ERRORS_ADDR, DMD_DVBS2_PER_COUNT_ADDR, DMD_DVBS2_PER_WINDOW_ADDR, DMD_DVBS_CORR_RS_ERRORS_ADDR, DMD_DVBS_UNCORR_RS_ERRORS_ADDR, DMD_DVBS_BER_COUNT_ADDR, DMD_DVBS_BER_WINDOW_ADDR, DMD_TUNER_ID_ADDR, DMD_DVBS2_PILOT_ON_OFF_ADDR, DMD_FREQ_SEARCH_RANGE_IN_KHZ_ADDR, MXL_DEMOD_CHAN_PARAMS_BUFF_SIZE, }; enum MXL_HYDRA_TUNER_ID_E { MXL_HYDRA_TUNER_ID_0 = 0, MXL_HYDRA_TUNER_ID_1, MXL_HYDRA_TUNER_ID_2, MXL_HYDRA_TUNER_ID_3, MXL_HYDRA_TUNER_MAX }; enum MXL_HYDRA_BCAST_STD_E { MXL_HYDRA_DSS = 0, MXL_HYDRA_DVBS, MXL_HYDRA_DVBS2, }; enum MXL_HYDRA_FEC_E { MXL_HYDRA_FEC_AUTO = 0, MXL_HYDRA_FEC_1_2, MXL_HYDRA_FEC_3_5, MXL_HYDRA_FEC_2_3, MXL_HYDRA_FEC_3_4, MXL_HYDRA_FEC_4_5, MXL_HYDRA_FEC_5_6, MXL_HYDRA_FEC_6_7, MXL_HYDRA_FEC_7_8, MXL_HYDRA_FEC_8_9, MXL_HYDRA_FEC_9_10, }; enum MXL_HYDRA_MODULATION_E { MXL_HYDRA_MOD_AUTO = 0, MXL_HYDRA_MOD_QPSK, MXL_HYDRA_MOD_8PSK }; enum MXL_HYDRA_SPECTRUM_E { MXL_HYDRA_SPECTRUM_AUTO = 0, MXL_HYDRA_SPECTRUM_INVERTED, MXL_HYDRA_SPECTRUM_NON_INVERTED, }; enum MXL_HYDRA_ROLLOFF_E { MXL_HYDRA_ROLLOFF_AUTO = 0, MXL_HYDRA_ROLLOFF_0_20, MXL_HYDRA_ROLLOFF_0_25, MXL_HYDRA_ROLLOFF_0_35 }; enum MXL_HYDRA_PILOTS_E { MXL_HYDRA_PILOTS_OFF = 0, MXL_HYDRA_PILOTS_ON, MXL_HYDRA_PILOTS_AUTO }; enum MXL_HYDRA_CONSTELLATION_SRC_E { MXL_HYDRA_FORMATTER = 0, MXL_HYDRA_LEGACY_FEC, MXL_HYDRA_FREQ_RECOVERY, MXL_HYDRA_NBC, MXL_HYDRA_CTL, MXL_HYDRA_EQ, }; struct MXL_HYDRA_DEMOD_LOCK_T { int agc_lock; /* AGC lock info */ int fec_lock; /* Demod FEC block lock info */ }; struct MXL_HYDRA_DEMOD_STATUS_DVBS_T { u32 rs_errors; /* RS decoder err counter */ u32 ber_window; /* Ber Windows */ u32 ber_count; /* BER count */ u32 ber_window_iter1; /* Ber Windows - post viterbi */ u32 ber_count_iter1; /* BER count - post viterbi */ }; struct MXL_HYDRA_DEMOD_STATUS_DSS_T { u32 rs_errors; /* RS decoder err counter */ u32 ber_window; /* Ber Windows */ u32 ber_count; /* BER count */ }; struct MXL_HYDRA_DEMOD_STATUS_DVBS2_T { u32 crc_errors; /* CRC error counter */ u32 packet_error_count; /* Number of packet errors */ u32 total_packets; /* Total packets */ }; struct MXL_HYDRA_DEMOD_STATUS_T { enum MXL_HYDRA_BCAST_STD_E standard_mask; /* Standard DVB-S, DVB-S2 or DSS */ union { struct MXL_HYDRA_DEMOD_STATUS_DVBS_T demod_status_dvbs; /* DVB-S demod status */ struct MXL_HYDRA_DEMOD_STATUS_DVBS2_T demod_status_dvbs2; /* DVB-S2 demod status */ struct MXL_HYDRA_DEMOD_STATUS_DSS_T demod_status_dss; /* DSS demod status */ } u; }; struct MXL_HYDRA_DEMOD_SIG_OFFSET_INFO_T { s32 carrier_offset_in_hz; /* CRL offset info */ s32 symbol_offset_in_symbol; /* SRL offset info */ }; struct MXL_HYDRA_DEMOD_SCRAMBLE_INFO_T { u8 scramble_sequence[MXL_DEMOD_SCRAMBLE_SEQ_LEN]; /* scramble sequence */ u32 scramble_code; /* scramble gold code */ }; enum MXL_HYDRA_SPECTRUM_STEP_SIZE_E { MXL_HYDRA_STEP_SIZE_24_XTAL_102_05KHZ, /* 102.05 KHz for 24 MHz XTAL */ MXL_HYDRA_STEP_SIZE_24_XTAL_204_10KHZ, /* 204.10 KHz for 24 MHz XTAL */ MXL_HYDRA_STEP_SIZE_24_XTAL_306_15KHZ, /* 306.15 KHz for 24 MHz XTAL */ MXL_HYDRA_STEP_SIZE_24_XTAL_408_20KHZ, /* 408.20 KHz for 24 MHz XTAL */ MXL_HYDRA_STEP_SIZE_27_XTAL_102_05KHZ, /* 102.05 KHz for 27 MHz XTAL */ MXL_HYDRA_STEP_SIZE_27_XTAL_204_35KHZ, /* 204.35 KHz for 27 MHz XTAL */ MXL_HYDRA_STEP_SIZE_27_XTAL_306_52KHZ, /* 306.52 KHz for 27 MHz XTAL */ MXL_HYDRA_STEP_SIZE_27_XTAL_408_69KHZ, /* 408.69 KHz for 27 MHz XTAL */ }; enum MXL_HYDRA_SPECTRUM_RESOLUTION_E { MXL_HYDRA_SPECTRUM_RESOLUTION_00_1_DB, /* 0.1 dB */ MXL_HYDRA_SPECTRUM_RESOLUTION_01_0_DB, /* 1.0 dB */ MXL_HYDRA_SPECTRUM_RESOLUTION_05_0_DB, /* 5.0 dB */ MXL_HYDRA_SPECTRUM_RESOLUTION_10_0_DB, /* 10 dB */ }; enum MXL_HYDRA_SPECTRUM_ERROR_CODE_E { MXL_SPECTRUM_NO_ERROR, MXL_SPECTRUM_INVALID_PARAMETER, MXL_SPECTRUM_INVALID_STEP_SIZE, MXL_SPECTRUM_BW_CANNOT_BE_COVERED, MXL_SPECTRUM_DEMOD_BUSY, MXL_SPECTRUM_TUNER_NOT_ENABLED, }; struct MXL_HYDRA_SPECTRUM_REQ_T { u32 tuner_index; /* TUNER Ctrl: one of MXL58x_TUNER_ID_E */ u32 demod_index; /* DEMOD Ctrl: one of MXL58x_DEMOD_ID_E */ enum MXL_HYDRA_SPECTRUM_STEP_SIZE_E step_size_in_khz; u32 starting_freq_ink_hz; u32 total_steps; enum MXL_HYDRA_SPECTRUM_RESOLUTION_E spectrum_division; }; enum MXL_HYDRA_SEARCH_FREQ_OFFSET_TYPE_E { MXL_HYDRA_SEARCH_MAX_OFFSET = 0, /* DMD searches for max freq offset (i.e. 5MHz) */ MXL_HYDRA_SEARCH_BW_PLUS_ROLLOFF, /* DMD searches for BW + ROLLOFF/2 */ }; struct MXL58X_CFG_FREQ_OFF_SEARCH_RANGE_T { u32 demod_index; enum MXL_HYDRA_SEARCH_FREQ_OFFSET_TYPE_E search_type; }; /* there are two slices * slice0 - TS0, TS1, TS2 & TS3 * slice1 - TS4, TS5, TS6 & TS7 */ #define MXL_HYDRA_TS_SLICE_MAX 2 #define MAX_FIXED_PID_NUM 32 #define MXL_HYDRA_NCO_CLK 418 /* 418 MHz */ #define MXL_HYDRA_MAX_TS_CLOCK 139 /* 139 MHz */ #define MXL_HYDRA_TS_FIXED_PID_FILT_SIZE 32 #define MXL_HYDRA_SHARED_PID_FILT_SIZE_DEFAULT 33 /* Shared PID filter size in 1-1 mux mode */ #define MXL_HYDRA_SHARED_PID_FILT_SIZE_2_TO_1 66 /* Shared PID filter size in 2-1 mux mode */ #define MXL_HYDRA_SHARED_PID_FILT_SIZE_4_TO_1 132 /* Shared PID filter size in 4-1 mux mode */ enum MXL_HYDRA_PID_BANK_TYPE_E { MXL_HYDRA_SOFTWARE_PID_BANK = 0, MXL_HYDRA_HARDWARE_PID_BANK, }; enum MXL_HYDRA_TS_MUX_MODE_E { MXL_HYDRA_TS_MUX_PID_REMAP = 0, MXL_HYDRA_TS_MUX_PREFIX_EXTRA_HEADER = 1, }; enum MXL_HYDRA_TS_MUX_TYPE_E { MXL_HYDRA_TS_MUX_DISABLE = 0, /* No Mux ( 1 TSIF to 1 TSIF) */ MXL_HYDRA_TS_MUX_2_TO_1, /* Mux 2 TSIF to 1 TSIF */ MXL_HYDRA_TS_MUX_4_TO_1, /* Mux 4 TSIF to 1 TSIF */ }; enum MXL_HYDRA_TS_GROUP_E { MXL_HYDRA_TS_GROUP_0_3 = 0, /* TS group 0 to 3 (TS0, TS1, TS2 & TS3) */ MXL_HYDRA_TS_GROUP_4_7, /* TS group 0 to 3 (TS4, TS5, TS6 & TS7) */ }; enum MXL_HYDRA_TS_PID_FLT_CTRL_E { MXL_HYDRA_TS_PIDS_ALLOW_ALL = 0, /* Allow all pids */ MXL_HYDRA_TS_PIDS_DROP_ALL, /* Drop all pids */ MXL_HYDRA_TS_INVALIDATE_PID_FILTER, /* Delete current PD filter in the device */ }; enum MXL_HYDRA_TS_PID_TYPE_E { MXL_HYDRA_TS_PID_FIXED = 0, MXL_HYDRA_TS_PID_REGULAR, }; struct MXL_HYDRA_TS_PID_T { u16 original_pid; /* pid from TS */ u16 remapped_pid; /* remapped pid */ enum MXL_BOOL_E enable; /* enable or disable pid */ enum MXL_BOOL_E allow_or_drop; /* allow or drop pid */ enum MXL_BOOL_E enable_pid_remap; /* enable or disable pid remap */ u8 bond_id; /* Bond ID in A0 always 0 - Only for 568 Sku */ u8 dest_id; /* Output port ID for the PID - Only for 568 Sku */ }; struct MXL_HYDRA_TS_MUX_PREFIX_HEADER_T { enum MXL_BOOL_E enable; u8 num_byte; u8 header[12]; }; enum MXL_HYDRA_PID_FILTER_BANK_E { MXL_HYDRA_PID_BANK_A = 0, MXL_HYDRA_PID_BANK_B, }; enum MXL_HYDRA_MPEG_DATA_FMT_E { MXL_HYDRA_MPEG_SERIAL_MSB_1ST = 0, MXL_HYDRA_MPEG_SERIAL_LSB_1ST, MXL_HYDRA_MPEG_SYNC_WIDTH_BIT = 0, MXL_HYDRA_MPEG_SYNC_WIDTH_BYTE }; enum MXL_HYDRA_MPEG_MODE_E { MXL_HYDRA_MPEG_MODE_SERIAL_4_WIRE = 0, /* MPEG 4 Wire serial mode */ MXL_HYDRA_MPEG_MODE_SERIAL_3_WIRE, /* MPEG 3 Wire serial mode */ MXL_HYDRA_MPEG_MODE_SERIAL_2_WIRE, /* MPEG 2 Wire serial mode */ MXL_HYDRA_MPEG_MODE_PARALLEL /* MPEG parallel mode - valid only for MxL581 */ }; enum MXL_HYDRA_MPEG_CLK_TYPE_E { MXL_HYDRA_MPEG_CLK_CONTINUOUS = 0, /* Continuous MPEG clock */ MXL_HYDRA_MPEG_CLK_GAPPED, /* Gapped (gated) MPEG clock */ }; enum MXL_HYDRA_MPEG_CLK_FMT_E { MXL_HYDRA_MPEG_ACTIVE_LOW = 0, MXL_HYDRA_MPEG_ACTIVE_HIGH, MXL_HYDRA_MPEG_CLK_NEGATIVE = 0, MXL_HYDRA_MPEG_CLK_POSITIVE, MXL_HYDRA_MPEG_CLK_IN_PHASE = 0, MXL_HYDRA_MPEG_CLK_INVERTED, }; enum MXL_HYDRA_MPEG_CLK_PHASE_E { MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_0_DEG = 0, MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_90_DEG, MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_180_DEG, MXL_HYDRA_MPEG_CLK_PHASE_SHIFT_270_DEG }; enum MXL_HYDRA_MPEG_ERR_INDICATION_E { MXL_HYDRA_MPEG_ERR_REPLACE_SYNC = 0, MXL_HYDRA_MPEG_ERR_REPLACE_VALID, MXL_HYDRA_MPEG_ERR_INDICATION_DISABLED }; struct MXL_HYDRA_MPEGOUT_PARAM_T { int enable; /* Enable or Disable MPEG OUT */ enum MXL_HYDRA_MPEG_CLK_TYPE_E mpeg_clk_type; /* Continuous or gapped */ enum MXL_HYDRA_MPEG_CLK_FMT_E mpeg_clk_pol; /* MPEG Clk polarity */ u8 max_mpeg_clk_rate; /* Max MPEG Clk rate (0 - 104 MHz, 139 MHz) */ enum MXL_HYDRA_MPEG_CLK_PHASE_E mpeg_clk_phase; /* MPEG Clk phase */ enum MXL_HYDRA_MPEG_DATA_FMT_E lsb_or_msb_first; /* LSB first or MSB first in TS transmission */ enum MXL_HYDRA_MPEG_DATA_FMT_E mpeg_sync_pulse_width; /* MPEG SYNC pulse width (1-bit or 1-byte) */ enum MXL_HYDRA_MPEG_CLK_FMT_E mpeg_valid_pol; /* MPEG VALID polarity */ enum MXL_HYDRA_MPEG_CLK_FMT_E mpeg_sync_pol; /* MPEG SYNC polarity */ enum MXL_HYDRA_MPEG_MODE_E mpeg_mode; /* config 4/3/2-wire serial or parallel TS out */ enum MXL_HYDRA_MPEG_ERR_INDICATION_E mpeg_error_indication; /* Enable or Disable MPEG error indication */ }; enum MXL_HYDRA_EXT_TS_IN_ID_E { MXL_HYDRA_EXT_TS_IN_0 = 0, MXL_HYDRA_EXT_TS_IN_1, MXL_HYDRA_EXT_TS_IN_2, MXL_HYDRA_EXT_TS_IN_3, MXL_HYDRA_EXT_TS_IN_MAX }; enum MXL_HYDRA_TS_OUT_ID_E { MXL_HYDRA_TS_OUT_0 = 0, MXL_HYDRA_TS_OUT_1, MXL_HYDRA_TS_OUT_2, MXL_HYDRA_TS_OUT_3, MXL_HYDRA_TS_OUT_4, MXL_HYDRA_TS_OUT_5, MXL_HYDRA_TS_OUT_6, MXL_HYDRA_TS_OUT_7, MXL_HYDRA_TS_OUT_MAX }; enum MXL_HYDRA_TS_DRIVE_STRENGTH_E { MXL_HYDRA_TS_DRIVE_STRENGTH_1X = 0, MXL_HYDRA_TS_DRIVE_STRENGTH_2X, MXL_HYDRA_TS_DRIVE_STRENGTH_3X, MXL_HYDRA_TS_DRIVE_STRENGTH_4X, MXL_HYDRA_TS_DRIVE_STRENGTH_5X, MXL_HYDRA_TS_DRIVE_STRENGTH_6X, MXL_HYDRA_TS_DRIVE_STRENGTH_7X, MXL_HYDRA_TS_DRIVE_STRENGTH_8X }; enum MXL_HYDRA_DEVICE_E { MXL_HYDRA_DEVICE_581 = 0, MXL_HYDRA_DEVICE_584, MXL_HYDRA_DEVICE_585, MXL_HYDRA_DEVICE_544, MXL_HYDRA_DEVICE_561, MXL_HYDRA_DEVICE_TEST, MXL_HYDRA_DEVICE_582, MXL_HYDRA_DEVICE_541, MXL_HYDRA_DEVICE_568, MXL_HYDRA_DEVICE_542, MXL_HYDRA_DEVICE_541S, MXL_HYDRA_DEVICE_561S, MXL_HYDRA_DEVICE_581S, MXL_HYDRA_DEVICE_MAX }; /* Demod IQ data */ struct MXL_HYDRA_DEMOD_IQ_SRC_T { u32 demod_id; u32 source_of_iq; /* == 0, it means I/Q comes from Formatter * == 1, Legacy FEC * == 2, Frequency Recovery * == 3, NBC * == 4, CTL * == 5, EQ * == 6, FPGA */ }; struct MXL_HYDRA_DEMOD_ABORT_TUNE_T { u32 demod_id; }; struct MXL_HYDRA_TUNER_CMD { u8 tuner_id; u8 enable; }; /* Demod Para for Channel Tune */ struct MXL_HYDRA_DEMOD_PARAM_T { u32 tuner_index; u32 demod_index; u32 frequency_in_hz; /* Frequency */ u32 standard; /* one of MXL_HYDRA_BCAST_STD_E */ u32 spectrum_inversion; /* Input : Spectrum inversion. */ u32 roll_off; /* rollOff (alpha) factor */ u32 symbol_rate_in_hz; /* Symbol rate */ u32 pilots; /* TRUE = pilots enabled */ u32 modulation_scheme; /* Input : Modulation Scheme is one of MXL_HYDRA_MODULATION_E */ u32 fec_code_rate; /* Input : Forward error correction rate. Is one of MXL_HYDRA_FEC_E */ u32 max_carrier_offset_in_mhz; /* Maximum carrier freq offset in MHz. Same as freqSearchRangeKHz, but in unit of MHz. */ }; struct MXL_HYDRA_DEMOD_SCRAMBLE_CODE_T { u32 demod_index; u8 scramble_sequence[12]; /* scramble sequence */ u32 scramble_code; /* scramble gold code */ }; struct MXL_INTR_CFG_T { u32 intr_type; u32 intr_duration_in_nano_secs; u32 intr_mask; }; struct MXL_HYDRA_POWER_MODE_CMD { u8 power_mode; /* enumeration values are defined in MXL_HYDRA_PWR_MODE_E (device API.h) */ }; struct MXL_HYDRA_RF_WAKEUP_PARAM_T { u32 time_interval_in_seconds; /* in seconds */ u32 tuner_index; s32 rssi_threshold; }; struct MXL_HYDRA_RF_WAKEUP_CFG_T { u32 tuner_count; struct MXL_HYDRA_RF_WAKEUP_PARAM_T params; }; enum MXL_HYDRA_AUX_CTRL_MODE_E { MXL_HYDRA_AUX_CTRL_MODE_FSK = 0, /* Select FSK controller */ MXL_HYDRA_AUX_CTRL_MODE_DISEQC, /* Select DiSEqC controller */ }; enum MXL_HYDRA_DISEQC_OPMODE_E { MXL_HYDRA_DISEQC_ENVELOPE_MODE = 0, MXL_HYDRA_DISEQC_TONE_MODE, }; enum MXL_HYDRA_DISEQC_VER_E { MXL_HYDRA_DISEQC_1_X = 0, /* Config DiSEqC 1.x mode */ MXL_HYDRA_DISEQC_2_X, /* Config DiSEqC 2.x mode */ MXL_HYDRA_DISEQC_DISABLE /* Disable DiSEqC */ }; enum MXL_HYDRA_DISEQC_CARRIER_FREQ_E { MXL_HYDRA_DISEQC_CARRIER_FREQ_22KHZ = 0, /* DiSEqC signal frequency of 22 KHz */ MXL_HYDRA_DISEQC_CARRIER_FREQ_33KHZ, /* DiSEqC signal frequency of 33 KHz */ MXL_HYDRA_DISEQC_CARRIER_FREQ_44KHZ /* DiSEqC signal frequency of 44 KHz */ }; enum MXL_HYDRA_DISEQC_ID_E { MXL_HYDRA_DISEQC_ID_0 = 0, MXL_HYDRA_DISEQC_ID_1, MXL_HYDRA_DISEQC_ID_2, MXL_HYDRA_DISEQC_ID_3 }; enum MXL_HYDRA_FSK_OP_MODE_E { MXL_HYDRA_FSK_CFG_TYPE_39KPBS = 0, /* 39.0kbps */ MXL_HYDRA_FSK_CFG_TYPE_39_017KPBS, /* 39.017kbps */ MXL_HYDRA_FSK_CFG_TYPE_115_2KPBS /* 115.2kbps */ }; struct MXL58X_DSQ_OP_MODE_T { u32 diseqc_id; /* DSQ 0, 1, 2 or 3 */ u32 op_mode; /* Envelope mode (0) or internal tone mode (1) */ u32 version; /* 0: 1.0, 1: 1.1, 2: Disable */ u32 center_freq; /* 0: 22KHz, 1: 33KHz and 2: 44 KHz */ }; struct MXL_HYDRA_DISEQC_CFG_CONT_TONE_T { u32 diseqc_id; u32 cont_tone_flag; /* 1: Enable , 0: Disable */ };
/* * Copyright(c) 2011-2016 Intel Corporation. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * Authors: * Ke Yu * Zhiyuan Lv <[email protected]> * * Contributors: * Terrence Xu <[email protected]> * Changbin Du <[email protected]> * Bing Niu <[email protected]> * Zhi Wang <[email protected]> * */ #ifndef _GVT_EDID_H_ #define _GVT_EDID_H_ #include <linux/types.h> struct intel_vgpu; #define EDID_SIZE 128 #define EDID_ADDR 0x50 /* Linux hvm EDID addr */ struct intel_vgpu_edid_data { bool data_valid; unsigned char edid_block[EDID_SIZE]; }; enum gmbus_cycle_type { GMBUS_NOCYCLE = 0x0, NIDX_NS_W = 0x1, IDX_NS_W = 0x3, GMBUS_STOP = 0x4, NIDX_STOP = 0x5, IDX_STOP = 0x7 }; /* * States of GMBUS * * GMBUS0-3 could be related to the EDID virtualization. Another two GMBUS * registers, GMBUS4 (interrupt mask) and GMBUS5 (2 byte indes register), are * not considered here. Below describes the usage of GMBUS registers that are * cared by the EDID virtualization * * GMBUS0: * R/W * port selection. value of bit0 - bit2 corresponds to the GPIO registers. * * GMBUS1: * R/W Protect * Command and Status. * bit0 is the direction bit: 1 is read; 0 is write. * bit1 - bit7 is target 7-bit address. * bit16 - bit24 total byte count (ignore?) * * GMBUS2: * Most of bits are read only except bit 15 (IN_USE) * Status register * bit0 - bit8 current byte count * bit 11: hardware ready; * * GMBUS3: * Read/Write * Data for transfer */ /* From hw specs, Other phases like START, ADDRESS, INDEX * are invisible to GMBUS MMIO interface. So no definitions * in below enum types */ enum gvt_gmbus_phase { GMBUS_IDLE_PHASE = 0, GMBUS_DATA_PHASE, GMBUS_WAIT_PHASE, //GMBUS_STOP_PHASE, GMBUS_MAX_PHASE }; struct intel_vgpu_i2c_gmbus { unsigned int total_byte_count; /* from GMBUS1 */ enum gmbus_cycle_type cycle_type; enum gvt_gmbus_phase phase; }; struct intel_vgpu_i2c_aux_ch { bool i2c_over_aux_ch; bool aux_ch_mot; }; enum i2c_state { I2C_NOT_SPECIFIED = 0, I2C_GMBUS = 1, I2C_AUX_CH = 2 }; /* I2C sequences cannot interleave. * GMBUS and AUX_CH sequences cannot interleave. */ struct intel_vgpu_i2c_edid { enum i2c_state state; unsigned int port; bool target_selected; bool edid_available; unsigned int current_edid_read; struct intel_vgpu_i2c_gmbus gmbus; struct intel_vgpu_i2c_aux_ch aux_ch; }; void intel_vgpu_init_i2c_edid(struct intel_vgpu *vgpu); int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu, unsigned int offset, void *p_data, unsigned int bytes); void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu, int port_idx, unsigned int offset, void *p_data); #endif /*_GVT_EDID_H_*/
// SPDX-License-Identifier: GPL-2.0-only /dts-v1/; #include "msm8916-pm8916.dtsi" #include "msm8916-modem-qdsp6.dtsi" #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> #include <dt-bindings/interrupt-controller/irq.h> / { model = "Asus Zenfone 2 Laser"; compatible = "asus,z00l", "qcom,msm8916"; chassis-type = "handset"; aliases { mmc0 = &sdhc_1; /* eMMC */ mmc1 = &sdhc_2; /* SD card */ serial0 = &blsp_uart2; }; chosen { stdout-path = "serial0"; }; gpio-keys { compatible = "gpio-keys"; pinctrl-names = "default"; pinctrl-0 = <&gpio_keys_default>; label = "GPIO Buttons"; button-volume-up { label = "Volume Up"; gpios = <&tlmm 107 GPIO_ACTIVE_LOW>; linux,code = <KEY_VOLUMEUP>; debounce-interval = <15>; }; button-volume-down { label = "Volume Down"; gpios = <&tlmm 117 GPIO_ACTIVE_LOW>; linux,code = <KEY_VOLUMEDOWN>; debounce-interval = <15>; }; }; reg_sd_vmmc: regulator-sdcard-vmmc { compatible = "regulator-fixed"; regulator-name = "sdcard-vmmc"; regulator-min-microvolt = <2950000>; regulator-max-microvolt = <2950000>; gpio = <&tlmm 87 GPIO_ACTIVE_HIGH>; enable-active-high; startup-delay-us = <200>; pinctrl-names = "default"; pinctrl-0 = <&sd_vmmc_en_default>; }; usb_id: usb-id { compatible = "linux,extcon-usb-gpio"; id-gpios = <&tlmm 110 GPIO_ACTIVE_HIGH>; pinctrl-names = "default"; pinctrl-0 = <&usb_id_default>; }; }; &blsp_i2c2 { status = "okay"; magnetometer@c { compatible = "asahi-kasei,ak09911"; reg = <0x0c>; vdd-supply = <&pm8916_l8>; vid-supply = <&pm8916_l6>; reset-gpios = <&tlmm 112 GPIO_ACTIVE_LOW>; pinctrl-names = "default"; pinctrl-0 = <&mag_reset_default>; }; imu@68 { compatible = "invensense,mpu6515"; reg = <0x68>; interrupt-parent = <&tlmm>; interrupts = <36 IRQ_TYPE_EDGE_RISING>; vdd-supply = <&pm8916_l17>; vddio-supply = <&pm8916_l6>; pinctrl-names = "default"; pinctrl-0 = <&imu_default>; mount-matrix = "1", "0", "0", "0", "-1", "0", "0", "0", "1"; }; }; &blsp_i2c5 { status = "okay"; touchscreen@38 { compatible = "edt,edt-ft5306"; reg = <0x38>; interrupt-parent = <&tlmm>; interrupts = <13 IRQ_TYPE_EDGE_FALLING>; reset-gpios = <&tlmm 12 GPIO_ACTIVE_LOW>; vcc-supply = <&pm8916_l11>; iovcc-supply = <&pm8916_l6>; touchscreen-size-x = <720>; touchscreen-size-y = <1280>; pinctrl-names = "default"; pinctrl-0 = <&touchscreen_default>; }; }; &blsp_uart2 { status = "okay"; }; &mpss_mem { reg = <0x0 0x86800000 0x0 0x5500000>; }; &pm8916_codec { qcom,micbias-lvl = <2800>; qcom,mbhc-vthreshold-low = <75 150 237 450 500>; qcom,mbhc-vthreshold-high = <75 150 237 450 500>; qcom,micbias1-ext-cap; qcom,hphl-jack-type-normally-open; }; &pm8916_rpm_regulators { pm8916_l17: l17 { regulator-min-microvolt = <2850000>; regulator-max-microvolt = <2850000>; }; }; &sdhc_1 { status = "okay"; }; &sdhc_2 { status = "okay"; vmmc-supply = <&reg_sd_vmmc>; pinctrl-names = "default", "sleep"; pinctrl-0 = <&sdc2_default &sdc2_cd_default>; pinctrl-1 = <&sdc2_sleep &sdc2_cd_default>; cd-gpios = <&tlmm 38 GPIO_ACTIVE_LOW>; }; &sound { audio-routing = "AMIC1", "MIC BIAS External1", "AMIC2", "MIC BIAS Internal2", "AMIC3", "MIC BIAS External1"; }; &usb { status = "okay"; extcon = <&usb_id>, <&usb_id>; }; &usb_hs_phy { extcon = <&usb_id>; }; &venus { status = "okay"; }; &venus_mem { status = "okay"; }; &wcnss { status = "okay"; }; &wcnss_iris { compatible = "qcom,wcn3620"; }; &wcnss_mem { status = "okay"; }; &tlmm { gpio_keys_default: gpio-keys-default-state { pins = "gpio107", "gpio117"; function = "gpio"; drive-strength = <2>; bias-pull-up; }; imu_default: imu-default-state { pins = "gpio36"; function = "gpio"; drive-strength = <2>; bias-disable; }; mag_reset_default: mag-reset-default-state { pins = "gpio112"; function = "gpio"; drive-strength = <2>; bias-disable; }; sd_vmmc_en_default: sd-vmmc-en-default-state { pins = "gpio87"; function = "gpio"; drive-strength = <2>; bias-disable; }; sdc2_cd_default: sdc2-cd-default-state { pins = "gpio38"; function = "gpio"; drive-strength = <2>; bias-disable; }; touchscreen_default: touchscreen-default-state { touch-pins { pins = "gpio13"; function = "gpio"; drive-strength = <2>; bias-pull-up; }; reset-pins { pins = "gpio12"; function = "gpio"; drive-strength = <2>; bias-disable; }; }; usb_id_default: usb-id-default-state { pins = "gpio110"; function = "gpio"; drive-strength = <8>; bias-pull-up; }; };