code
stringlengths
0
23.9M
// SPDX-License-Identifier: GPL-2.0 /* * Xilinx ZynqMP OCM ECC Driver * * Copyright (C) 2022 Advanced Micro Devices, Inc. */ #include <linux/edac.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include "edac_module.h" #define ZYNQMP_OCM_EDAC_MSG_SIZE 256 #define ZYNQMP_OCM_EDAC_STRING "zynqmp_ocm" /* Error/Interrupt registers */ #define ERR_CTRL_OFST 0x0 #define OCM_ISR_OFST 0x04 #define OCM_IMR_OFST 0x08 #define OCM_IEN_OFST 0x0C #define OCM_IDS_OFST 0x10 /* ECC control register */ #define ECC_CTRL_OFST 0x14 /* Correctable error info registers */ #define CE_FFA_OFST 0x1C #define CE_FFD0_OFST 0x20 #define CE_FFD1_OFST 0x24 #define CE_FFD2_OFST 0x28 #define CE_FFD3_OFST 0x2C #define CE_FFE_OFST 0x30 /* Uncorrectable error info registers */ #define UE_FFA_OFST 0x34 #define UE_FFD0_OFST 0x38 #define UE_FFD1_OFST 0x3C #define UE_FFD2_OFST 0x40 #define UE_FFD3_OFST 0x44 #define UE_FFE_OFST 0x48 /* ECC control register bit field definitions */ #define ECC_CTRL_CLR_CE_ERR 0x40 #define ECC_CTRL_CLR_UE_ERR 0x80 /* Fault injection data and count registers */ #define OCM_FID0_OFST 0x4C #define OCM_FID1_OFST 0x50 #define OCM_FID2_OFST 0x54 #define OCM_FID3_OFST 0x58 #define OCM_FIC_OFST 0x74 #define UE_MAX_BITPOS_LOWER 31 #define UE_MIN_BITPOS_UPPER 32 #define UE_MAX_BITPOS_UPPER 63 /* Interrupt masks */ #define OCM_CEINTR_MASK BIT(6) #define OCM_UEINTR_MASK BIT(7) #define OCM_ECC_ENABLE_MASK BIT(0) #define OCM_FICOUNT_MASK GENMASK(23, 0) #define OCM_NUM_UE_BITPOS 2 #define OCM_BASEVAL 0xFFFC0000 #define EDAC_DEVICE "ZynqMP-OCM" /** * struct ecc_error_info - ECC error log information * @addr: Fault generated at this address * @fault_lo: Generated fault data (lower 32-bit) * @fault_hi: Generated fault data (upper 32-bit) */ struct ecc_error_info { u32 addr; u32 fault_lo; u32 fault_hi; }; /** * struct ecc_status - ECC status information to report * @ce_cnt: Correctable error count * @ue_cnt: Uncorrectable error count * @ceinfo: Correctable error log information * @ueinfo: Uncorrectable error log information */ struct ecc_status { u32 ce_cnt; u32 ue_cnt; struct ecc_error_info ceinfo; struct ecc_error_info ueinfo; }; /** * struct edac_priv - OCM private instance data * @baseaddr: Base address of the OCM * @message: Buffer for framing the event specific info * @stat: ECC status information * @ce_cnt: Correctable Error count * @ue_cnt: Uncorrectable Error count * @debugfs_dir: Directory entry for debugfs * @ce_bitpos: Bit position for Correctable Error * @ue_bitpos: Array to store UnCorrectable Error bit positions * @fault_injection_cnt: Fault Injection Counter value */ struct edac_priv { void __iomem *baseaddr; char message[ZYNQMP_OCM_EDAC_MSG_SIZE]; struct ecc_status stat; u32 ce_cnt; u32 ue_cnt; #ifdef CONFIG_EDAC_DEBUG struct dentry *debugfs_dir; u8 ce_bitpos; u8 ue_bitpos[OCM_NUM_UE_BITPOS]; u32 fault_injection_cnt; #endif }; /** * get_error_info - Get the current ECC error info * @base: Pointer to the base address of the OCM * @p: Pointer to the OCM ECC status structure * @mask: Status register mask value * * Determines there is any ECC error or not * */ static void get_error_info(void __iomem *base, struct ecc_status *p, int mask) { if (mask & OCM_CEINTR_MASK) { p->ce_cnt++; p->ceinfo.fault_lo = readl(base + CE_FFD0_OFST); p->ceinfo.fault_hi = readl(base + CE_FFD1_OFST); p->ceinfo.addr = (OCM_BASEVAL | readl(base + CE_FFA_OFST)); writel(ECC_CTRL_CLR_CE_ERR, base + OCM_ISR_OFST); } else if (mask & OCM_UEINTR_MASK) { p->ue_cnt++; p->ueinfo.fault_lo = readl(base + UE_FFD0_OFST); p->ueinfo.fault_hi = readl(base + UE_FFD1_OFST); p->ueinfo.addr = (OCM_BASEVAL | readl(base + UE_FFA_OFST)); writel(ECC_CTRL_CLR_UE_ERR, base + OCM_ISR_OFST); } } /** * handle_error - Handle error types CE and UE * @dci: Pointer to the EDAC device instance * @p: Pointer to the OCM ECC status structure * * Handles correctable and uncorrectable errors. */ static void handle_error(struct edac_device_ctl_info *dci, struct ecc_status *p) { struct edac_priv *priv = dci->pvt_info; struct ecc_error_info *pinf; if (p->ce_cnt) { pinf = &p->ceinfo; snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE, "\nOCM ECC error type :%s\nAddr: [0x%x]\nFault Data[0x%08x%08x]", "CE", pinf->addr, pinf->fault_hi, pinf->fault_lo); edac_device_handle_ce(dci, 0, 0, priv->message); } if (p->ue_cnt) { pinf = &p->ueinfo; snprintf(priv->message, ZYNQMP_OCM_EDAC_MSG_SIZE, "\nOCM ECC error type :%s\nAddr: [0x%x]\nFault Data[0x%08x%08x]", "UE", pinf->addr, pinf->fault_hi, pinf->fault_lo); edac_device_handle_ue(dci, 0, 0, priv->message); } memset(p, 0, sizeof(*p)); } /** * intr_handler - ISR routine * @irq: irq number * @dev_id: device id pointer * * Return: IRQ_NONE, if CE/UE interrupt not set or IRQ_HANDLED otherwise */ static irqreturn_t intr_handler(int irq, void *dev_id) { struct edac_device_ctl_info *dci = dev_id; struct edac_priv *priv = dci->pvt_info; int regval; regval = readl(priv->baseaddr + OCM_ISR_OFST); if (!(regval & (OCM_CEINTR_MASK | OCM_UEINTR_MASK))) { WARN_ONCE(1, "Unhandled IRQ%d, ISR: 0x%x", irq, regval); return IRQ_NONE; } get_error_info(priv->baseaddr, &priv->stat, regval); priv->ce_cnt += priv->stat.ce_cnt; priv->ue_cnt += priv->stat.ue_cnt; handle_error(dci, &priv->stat); return IRQ_HANDLED; } /** * get_eccstate - Return the ECC status * @base: Pointer to the OCM base address * * Get the ECC enable/disable status * * Return: ECC status 0/1. */ static bool get_eccstate(void __iomem *base) { return readl(base + ECC_CTRL_OFST) & OCM_ECC_ENABLE_MASK; } #ifdef CONFIG_EDAC_DEBUG /** * write_fault_count - write fault injection count * @priv: Pointer to the EDAC private struct * * Update the fault injection count register, once the counter reaches * zero, it injects errors */ static void write_fault_count(struct edac_priv *priv) { u32 ficount = priv->fault_injection_cnt; if (ficount & ~OCM_FICOUNT_MASK) { ficount &= OCM_FICOUNT_MASK; edac_printk(KERN_INFO, EDAC_DEVICE, "Fault injection count value truncated to %d\n", ficount); } writel(ficount, priv->baseaddr + OCM_FIC_OFST); } /* * To get the Correctable Error injected, the following steps are needed: * - Setup the optional Fault Injection Count: * echo <fault_count val> > /sys/kernel/debug/edac/ocm/inject_fault_count * - Write the Correctable Error bit position value: * echo <bit_pos val> > /sys/kernel/debug/edac/ocm/inject_ce_bitpos */ static ssize_t inject_ce_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { struct edac_device_ctl_info *edac_dev = file->private_data; struct edac_priv *priv = edac_dev->pvt_info; int ret; if (!data) return -EFAULT; ret = kstrtou8_from_user(data, count, 0, &priv->ce_bitpos); if (ret) return ret; if (priv->ce_bitpos > UE_MAX_BITPOS_UPPER) return -EINVAL; if (priv->ce_bitpos <= UE_MAX_BITPOS_LOWER) { writel(BIT(priv->ce_bitpos), priv->baseaddr + OCM_FID0_OFST); writel(0, priv->baseaddr + OCM_FID1_OFST); } else { writel(BIT(priv->ce_bitpos - UE_MIN_BITPOS_UPPER), priv->baseaddr + OCM_FID1_OFST); writel(0, priv->baseaddr + OCM_FID0_OFST); } write_fault_count(priv); return count; } static const struct file_operations inject_ce_fops = { .open = simple_open, .write = inject_ce_write, .llseek = generic_file_llseek, }; /* * To get the Uncorrectable Error injected, the following steps are needed: * - Setup the optional Fault Injection Count: * echo <fault_count val> > /sys/kernel/debug/edac/ocm/inject_fault_count * - Write the Uncorrectable Error bit position values: * echo <bit_pos0 val>,<bit_pos1 val> > /sys/kernel/debug/edac/ocm/inject_ue_bitpos */ static ssize_t inject_ue_write(struct file *file, const char __user *data, size_t count, loff_t *ppos) { struct edac_device_ctl_info *edac_dev = file->private_data; struct edac_priv *priv = edac_dev->pvt_info; char buf[6], *pbuf, *token[2]; u64 ue_bitpos; int i, ret; u8 len; if (!data) return -EFAULT; len = min_t(size_t, count, sizeof(buf)); if (copy_from_user(buf, data, len)) return -EFAULT; buf[len] = '\0'; pbuf = &buf[0]; for (i = 0; i < OCM_NUM_UE_BITPOS; i++) token[i] = strsep(&pbuf, ","); ret = kstrtou8(token[0], 0, &priv->ue_bitpos[0]); if (ret) return ret; ret = kstrtou8(token[1], 0, &priv->ue_bitpos[1]); if (ret) return ret; if (priv->ue_bitpos[0] > UE_MAX_BITPOS_UPPER || priv->ue_bitpos[1] > UE_MAX_BITPOS_UPPER) return -EINVAL; if (priv->ue_bitpos[0] == priv->ue_bitpos[1]) { edac_printk(KERN_ERR, EDAC_DEVICE, "Bit positions should not be equal\n"); return -EINVAL; } ue_bitpos = BIT(priv->ue_bitpos[0]) | BIT(priv->ue_bitpos[1]); writel((u32)ue_bitpos, priv->baseaddr + OCM_FID0_OFST); writel((u32)(ue_bitpos >> 32), priv->baseaddr + OCM_FID1_OFST); write_fault_count(priv); return count; } static const struct file_operations inject_ue_fops = { .open = simple_open, .write = inject_ue_write, .llseek = generic_file_llseek, }; static void setup_debugfs(struct edac_device_ctl_info *edac_dev) { struct edac_priv *priv = edac_dev->pvt_info; priv->debugfs_dir = edac_debugfs_create_dir("ocm"); if (!priv->debugfs_dir) return; edac_debugfs_create_x32("inject_fault_count", 0644, priv->debugfs_dir, &priv->fault_injection_cnt); edac_debugfs_create_file("inject_ue_bitpos", 0644, priv->debugfs_dir, edac_dev, &inject_ue_fops); edac_debugfs_create_file("inject_ce_bitpos", 0644, priv->debugfs_dir, edac_dev, &inject_ce_fops); } #endif static int edac_probe(struct platform_device *pdev) { struct edac_device_ctl_info *dci; struct edac_priv *priv; void __iomem *baseaddr; struct resource *res; int irq, ret; baseaddr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(baseaddr)) return PTR_ERR(baseaddr); if (!get_eccstate(baseaddr)) { edac_printk(KERN_INFO, EDAC_DEVICE, "ECC not enabled\n"); return -ENXIO; } dci = edac_device_alloc_ctl_info(sizeof(*priv), ZYNQMP_OCM_EDAC_STRING, 1, ZYNQMP_OCM_EDAC_STRING, 1, 0, edac_device_alloc_index()); if (!dci) return -ENOMEM; priv = dci->pvt_info; platform_set_drvdata(pdev, dci); dci->dev = &pdev->dev; priv->baseaddr = baseaddr; dci->mod_name = pdev->dev.driver->name; dci->ctl_name = ZYNQMP_OCM_EDAC_STRING; dci->dev_name = dev_name(&pdev->dev); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto free_dev_ctl; } ret = devm_request_irq(&pdev->dev, irq, intr_handler, 0, dev_name(&pdev->dev), dci); if (ret) { edac_printk(KERN_ERR, EDAC_DEVICE, "Failed to request Irq\n"); goto free_dev_ctl; } /* Enable UE, CE interrupts */ writel((OCM_CEINTR_MASK | OCM_UEINTR_MASK), priv->baseaddr + OCM_IEN_OFST); #ifdef CONFIG_EDAC_DEBUG setup_debugfs(dci); #endif ret = edac_device_add_device(dci); if (ret) goto free_dev_ctl; return 0; free_dev_ctl: edac_device_free_ctl_info(dci); return ret; } static void edac_remove(struct platform_device *pdev) { struct edac_device_ctl_info *dci = platform_get_drvdata(pdev); struct edac_priv *priv = dci->pvt_info; /* Disable UE, CE interrupts */ writel((OCM_CEINTR_MASK | OCM_UEINTR_MASK), priv->baseaddr + OCM_IDS_OFST); #ifdef CONFIG_EDAC_DEBUG debugfs_remove_recursive(priv->debugfs_dir); #endif edac_device_del_device(&pdev->dev); edac_device_free_ctl_info(dci); } static const struct of_device_id zynqmp_ocm_edac_match[] = { { .compatible = "xlnx,zynqmp-ocmc-1.0"}, { /* end of table */ } }; MODULE_DEVICE_TABLE(of, zynqmp_ocm_edac_match); static struct platform_driver zynqmp_ocm_edac_driver = { .driver = { .name = "zynqmp-ocm-edac", .of_match_table = zynqmp_ocm_edac_match, }, .probe = edac_probe, .remove = edac_remove, }; module_platform_driver(zynqmp_ocm_edac_driver); MODULE_AUTHOR("Advanced Micro Devices, Inc"); MODULE_DESCRIPTION("Xilinx ZynqMP OCM ECC driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: MIT /* * Copyright © 2014 Intel Corporation */ #include "gem/i915_gem_internal.h" #include "i915_drv.h" #include "intel_renderstate.h" #include "intel_context.h" #include "intel_gpu_commands.h" #include "intel_ring.h" static const struct intel_renderstate_rodata * render_state_get_rodata(const struct intel_engine_cs *engine) { if (engine->class != RENDER_CLASS) return NULL; switch (GRAPHICS_VER(engine->i915)) { case 6: return &gen6_null_state; case 7: return &gen7_null_state; case 8: return &gen8_null_state; case 9: return &gen9_null_state; } return NULL; } /* * Macro to add commands to auxiliary batch. * This macro only checks for page overflow before inserting the commands, * this is sufficient as the null state generator makes the final batch * with two passes to build command and state separately. At this point * the size of both are known and it compacts them by relocating the state * right after the commands taking care of alignment so we should sufficient * space below them for adding new commands. */ #define OUT_BATCH(batch, i, val) \ do { \ if ((i) >= PAGE_SIZE / sizeof(u32)) \ goto out; \ (batch)[(i)++] = (val); \ } while (0) static int render_state_setup(struct intel_renderstate *so, struct drm_i915_private *i915) { const struct intel_renderstate_rodata *rodata = so->rodata; unsigned int i = 0, reloc_index = 0; int ret = -EINVAL; u32 *d; d = i915_gem_object_pin_map(so->vma->obj, I915_MAP_WB); if (IS_ERR(d)) return PTR_ERR(d); while (i < rodata->batch_items) { u32 s = rodata->batch[i]; if (i * 4 == rodata->reloc[reloc_index]) { u64 r = s + i915_vma_offset(so->vma); s = lower_32_bits(r); if (HAS_64BIT_RELOC(i915)) { if (i + 1 >= rodata->batch_items || rodata->batch[i + 1] != 0) goto out; d[i++] = s; s = upper_32_bits(r); } reloc_index++; } d[i++] = s; } if (rodata->reloc[reloc_index] != -1) { drm_err(&i915->drm, "only %d relocs resolved\n", reloc_index); goto out; } so->batch_offset = i915_ggtt_offset(so->vma); so->batch_size = rodata->batch_items * sizeof(u32); while (i % CACHELINE_DWORDS) OUT_BATCH(d, i, MI_NOOP); so->aux_offset = i * sizeof(u32); if (HAS_POOLED_EU(i915)) { /* * We always program 3x6 pool config but depending upon which * subslice is disabled HW drops down to appropriate config * shown below. * * In the below table 2x6 config always refers to * fused-down version, native 2x6 is not available and can * be ignored * * SNo subslices config eu pool configuration * ----------------------------------------------------------- * 1 3 subslices enabled (3x6) - 0x00777000 (9+9) * 2 ss0 disabled (2x6) - 0x00777000 (3+9) * 3 ss1 disabled (2x6) - 0x00770000 (6+6) * 4 ss2 disabled (2x6) - 0x00007000 (9+3) */ u32 eu_pool_config = 0x00777000; OUT_BATCH(d, i, GEN9_MEDIA_POOL_STATE); OUT_BATCH(d, i, GEN9_MEDIA_POOL_ENABLE); OUT_BATCH(d, i, eu_pool_config); OUT_BATCH(d, i, 0); OUT_BATCH(d, i, 0); OUT_BATCH(d, i, 0); } OUT_BATCH(d, i, MI_BATCH_BUFFER_END); so->aux_size = i * sizeof(u32) - so->aux_offset; so->aux_offset += so->batch_offset; /* * Since we are sending length, we need to strictly conform to * all requirements. For Gen2 this must be a multiple of 8. */ so->aux_size = ALIGN(so->aux_size, 8); ret = 0; out: __i915_gem_object_flush_map(so->vma->obj, 0, i * sizeof(u32)); __i915_gem_object_release_map(so->vma->obj); return ret; } #undef OUT_BATCH int intel_renderstate_init(struct intel_renderstate *so, struct intel_context *ce) { struct intel_engine_cs *engine = ce->engine; struct drm_i915_gem_object *obj = NULL; int err; memset(so, 0, sizeof(*so)); so->rodata = render_state_get_rodata(engine); if (so->rodata) { if (so->rodata->batch_items * 4 > PAGE_SIZE) return -EINVAL; obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE); if (IS_ERR(obj)) return PTR_ERR(obj); so->vma = i915_vma_instance(obj, &engine->gt->ggtt->vm, NULL); if (IS_ERR(so->vma)) { err = PTR_ERR(so->vma); goto err_obj; } } i915_gem_ww_ctx_init(&so->ww, true); retry: err = intel_context_pin_ww(ce, &so->ww); if (err) goto err_fini; /* return early if there's nothing to setup */ if (!err && !so->rodata) return 0; err = i915_gem_object_lock(so->vma->obj, &so->ww); if (err) goto err_context; err = i915_vma_pin_ww(so->vma, &so->ww, 0, 0, PIN_GLOBAL | PIN_HIGH); if (err) goto err_context; err = render_state_setup(so, engine->i915); if (err) goto err_unpin; return 0; err_unpin: i915_vma_unpin(so->vma); err_context: intel_context_unpin(ce); err_fini: if (err == -EDEADLK) { err = i915_gem_ww_ctx_backoff(&so->ww); if (!err) goto retry; } i915_gem_ww_ctx_fini(&so->ww); err_obj: if (obj) i915_gem_object_put(obj); so->vma = NULL; return err; } int intel_renderstate_emit(struct intel_renderstate *so, struct i915_request *rq) { struct intel_engine_cs *engine = rq->engine; int err; if (!so->vma) return 0; err = i915_vma_move_to_active(so->vma, rq, 0); if (err) return err; err = engine->emit_bb_start(rq, so->batch_offset, so->batch_size, I915_DISPATCH_SECURE); if (err) return err; if (so->aux_size > 8) { err = engine->emit_bb_start(rq, so->aux_offset, so->aux_size, I915_DISPATCH_SECURE); if (err) return err; } return 0; } void intel_renderstate_fini(struct intel_renderstate *so, struct intel_context *ce) { if (so->vma) { i915_vma_unpin(so->vma); i915_vma_close(so->vma); } intel_context_unpin(ce); i915_gem_ww_ctx_fini(&so->ww); if (so->vma) i915_gem_object_put(so->vma->obj); }
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef BTRFS_SUPER_H #define BTRFS_SUPER_H #include <linux/types.h> #include <linux/fs.h> #include "fs.h" struct super_block; struct btrfs_fs_info; bool btrfs_check_options(const struct btrfs_fs_info *info, unsigned long long *mount_opt, unsigned long flags); int btrfs_sync_fs(struct super_block *sb, int wait); char *btrfs_get_subvol_name_from_objectid(struct btrfs_fs_info *fs_info, u64 subvol_objectid); void btrfs_set_free_space_cache_settings(struct btrfs_fs_info *fs_info); static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) { return sb->s_fs_info; } static inline void btrfs_set_sb_rdonly(struct super_block *sb) { sb->s_flags |= SB_RDONLY; set_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state); } static inline void btrfs_clear_sb_rdonly(struct super_block *sb) { sb->s_flags &= ~SB_RDONLY; clear_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state); } #endif
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /dts-v1/; #include <dt-bindings/gpio/gpio.h> #include "bcm2712.dtsi" / { compatible = "raspberrypi,5-model-b", "brcm,bcm2712"; model = "Raspberry Pi 5"; aliases { serial10 = &uart10; }; chosen: chosen { stdout-path = "serial10:115200n8"; }; /* Will be filled by the bootloader */ memory@0 { device_type = "memory"; reg = <0 0 0 0x28000000>; }; sd_io_1v8_reg: sd-io-1v8-reg { compatible = "regulator-gpio"; regulator-name = "vdd-sd-io"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-boot-on; regulator-always-on; regulator-settling-time-us = <5000>; gpios = <&gio_aon 3 GPIO_ACTIVE_HIGH>; states = <1800000 1>, <3300000 0>; }; sd_vcc_reg: sd-vcc-reg { compatible = "regulator-fixed"; regulator-name = "vcc-sd"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; regulator-boot-on; enable-active-high; gpios = <&gio_aon 4 GPIO_ACTIVE_HIGH>; }; }; /* The Debug UART, on Rpi5 it's on JST-SH 1.0mm 3-pin connector * labeled "UART", i.e. the interface with the system console. */ &uart10 { status = "okay"; }; /* SDIO1 is used to drive the SD card */ &sdio1 { vqmmc-supply = <&sd_io_1v8_reg>; vmmc-supply = <&sd_vcc_reg>; bus-width = <4>; sd-uhs-sdr50; sd-uhs-ddr50; sd-uhs-sdr104; };
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2012 Stefan Roese <[email protected]> */ #include <linux/device.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/unaligned.h> #define FIRMWARE_NAME "lattice-ecp3.bit" /* * The JTAG ID's of the supported FPGA's. The ID is 32bit wide * reversed as noted in the manual. */ #define ID_ECP3_17 0xc2088080 #define ID_ECP3_35 0xc2048080 /* FPGA commands */ #define FPGA_CMD_READ_ID 0x07 /* plus 24 bits */ #define FPGA_CMD_READ_STATUS 0x09 /* plus 24 bits */ #define FPGA_CMD_CLEAR 0x70 #define FPGA_CMD_REFRESH 0x71 #define FPGA_CMD_WRITE_EN 0x4a /* plus 2 bits */ #define FPGA_CMD_WRITE_DIS 0x4f /* plus 8 bits */ #define FPGA_CMD_WRITE_INC 0x41 /* plus 0 bits */ /* * The status register is 32bit revered, DONE is bit 17 from the TN1222.pdf * (LatticeECP3 Slave SPI Port User's Guide) */ #define FPGA_STATUS_DONE 0x00004000 #define FPGA_STATUS_CLEARED 0x00010000 #define FPGA_CLEAR_TIMEOUT 5000 /* max. 5000ms for FPGA clear */ #define FPGA_CLEAR_MSLEEP 10 #define FPGA_CLEAR_LOOP_COUNT (FPGA_CLEAR_TIMEOUT / FPGA_CLEAR_MSLEEP) struct fpga_data { struct completion fw_loaded; }; struct ecp3_dev { u32 jedec_id; char *name; }; static const struct ecp3_dev ecp3_dev[] = { { .jedec_id = ID_ECP3_17, .name = "Lattice ECP3-17", }, { .jedec_id = ID_ECP3_35, .name = "Lattice ECP3-35", }, }; static void firmware_load(const struct firmware *fw, void *context) { struct spi_device *spi = (struct spi_device *)context; struct fpga_data *data = spi_get_drvdata(spi); u8 *buffer; u8 txbuf[8]; u8 rxbuf[8]; int rx_len = 8; int i; u32 jedec_id; u32 status; if (fw == NULL) { dev_err(&spi->dev, "Cannot load firmware, aborting\n"); goto out; } if (fw->size == 0) { dev_err(&spi->dev, "Error: Firmware size is 0!\n"); goto out; } /* Fill dummy data (24 stuffing bits for commands) */ txbuf[1] = 0x00; txbuf[2] = 0x00; txbuf[3] = 0x00; /* Trying to speak with the FPGA via SPI... */ txbuf[0] = FPGA_CMD_READ_ID; spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); jedec_id = get_unaligned_be32(&rxbuf[4]); dev_dbg(&spi->dev, "FPGA JTAG ID=%08x\n", jedec_id); for (i = 0; i < ARRAY_SIZE(ecp3_dev); i++) { if (jedec_id == ecp3_dev[i].jedec_id) break; } if (i == ARRAY_SIZE(ecp3_dev)) { dev_err(&spi->dev, "Error: No supported FPGA detected (JEDEC_ID=%08x)!\n", jedec_id); goto out; } dev_info(&spi->dev, "FPGA %s detected\n", ecp3_dev[i].name); txbuf[0] = FPGA_CMD_READ_STATUS; spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); status = get_unaligned_be32(&rxbuf[4]); dev_dbg(&spi->dev, "FPGA Status=%08x\n", status); buffer = kzalloc(fw->size + 8, GFP_KERNEL); if (!buffer) { dev_err(&spi->dev, "Error: Can't allocate memory!\n"); goto out; } /* * Insert WRITE_INC command into stream (one SPI frame) */ buffer[0] = FPGA_CMD_WRITE_INC; buffer[1] = 0xff; buffer[2] = 0xff; buffer[3] = 0xff; memcpy(buffer + 4, fw->data, fw->size); txbuf[0] = FPGA_CMD_REFRESH; spi_write(spi, txbuf, 4); txbuf[0] = FPGA_CMD_WRITE_EN; spi_write(spi, txbuf, 4); txbuf[0] = FPGA_CMD_CLEAR; spi_write(spi, txbuf, 4); /* * Wait for FPGA memory to become cleared */ for (i = 0; i < FPGA_CLEAR_LOOP_COUNT; i++) { txbuf[0] = FPGA_CMD_READ_STATUS; spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); status = get_unaligned_be32(&rxbuf[4]); if (status == FPGA_STATUS_CLEARED) break; msleep(FPGA_CLEAR_MSLEEP); } if (i == FPGA_CLEAR_LOOP_COUNT) { dev_err(&spi->dev, "Error: Timeout waiting for FPGA to clear (status=%08x)!\n", status); kfree(buffer); goto out; } dev_info(&spi->dev, "Configuring the FPGA...\n"); spi_write(spi, buffer, fw->size + 8); txbuf[0] = FPGA_CMD_WRITE_DIS; spi_write(spi, txbuf, 4); txbuf[0] = FPGA_CMD_READ_STATUS; spi_write_then_read(spi, txbuf, 8, rxbuf, rx_len); status = get_unaligned_be32(&rxbuf[4]); dev_dbg(&spi->dev, "FPGA Status=%08x\n", status); /* Check result */ if (status & FPGA_STATUS_DONE) dev_info(&spi->dev, "FPGA successfully configured!\n"); else dev_info(&spi->dev, "FPGA not configured (DONE not set)\n"); /* * Don't forget to release the firmware again */ release_firmware(fw); kfree(buffer); out: complete(&data->fw_loaded); } static int lattice_ecp3_probe(struct spi_device *spi) { struct fpga_data *data; int err; data = devm_kzalloc(&spi->dev, sizeof(*data), GFP_KERNEL); if (!data) { dev_err(&spi->dev, "Memory allocation for fpga_data failed\n"); return -ENOMEM; } spi_set_drvdata(spi, data); init_completion(&data->fw_loaded); err = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT, FIRMWARE_NAME, &spi->dev, GFP_KERNEL, spi, firmware_load); if (err) { dev_err(&spi->dev, "Firmware loading failed with %d!\n", err); return err; } dev_info(&spi->dev, "FPGA bitstream configuration driver registered\n"); return 0; } static void lattice_ecp3_remove(struct spi_device *spi) { struct fpga_data *data = spi_get_drvdata(spi); wait_for_completion(&data->fw_loaded); } static const struct spi_device_id lattice_ecp3_id[] = { { "ecp3-17", 0 }, { "ecp3-35", 0 }, { } }; MODULE_DEVICE_TABLE(spi, lattice_ecp3_id); static struct spi_driver lattice_ecp3_driver = { .driver = { .name = "lattice-ecp3", }, .probe = lattice_ecp3_probe, .remove = lattice_ecp3_remove, .id_table = lattice_ecp3_id, }; module_spi_driver(lattice_ecp3_driver); MODULE_AUTHOR("Stefan Roese <[email protected]>"); MODULE_DESCRIPTION("Lattice ECP3 FPGA configuration via SPI"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE_NAME);
// SPDX-License-Identifier: GPL-2.0-only OR MIT /* * Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/ * * EVM Board Schematics: https://www.ti.com/lit/zip/sprr458 */ /dts-v1/; #include <dt-bindings/net/ti-dp83867.h> #include <dt-bindings/gpio/gpio.h> #include "k3-j784s4.dtsi" #include "k3-j784s4-j742s2-evm-common.dtsi" / { compatible = "ti,j784s4-evm", "ti,j784s4"; model = "Texas Instruments J784S4 EVM"; memory@80000000 { /* 32G RAM */ reg = <0x00000000 0x80000000 0x00000000 0x80000000>, <0x00000008 0x80000000 0x00000007 0x80000000>; device_type = "memory"; bootph-all; }; reserved_memory: reserved-memory { #address-cells = <2>; #size-cells = <2>; c71_3_dma_memory_region: c71-dma-memory@ab000000 { compatible = "shared-dma-pool"; reg = <0x00 0xab000000 0x00 0x100000>; no-map; }; c71_3_memory_region: c71-memory@ab100000 { compatible = "shared-dma-pool"; reg = <0x00 0xab100000 0x00 0xf00000>; no-map; }; }; }; &mailbox0_cluster5 { mbox_c71_3: mbox-c71-3 { ti,mbox-rx = <2 0 0>; ti,mbox-tx = <3 0 0>; }; }; &c71_3 { mboxes = <&mailbox0_cluster5 &mbox_c71_3>; memory-region = <&c71_3_dma_memory_region>, <&c71_3_memory_region>; status = "okay"; };
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright (c) 2023, Intel Corporation. */ #ifndef _ICE_SF_VSI_VLAN_OPS_H_ #define _ICE_SF_VSI_VLAN_OPS_H_ #include "ice_vsi_vlan_ops.h" struct ice_vsi; void ice_sf_vsi_init_vlan_ops(struct ice_vsi *vsi); #endif /* _ICE_SF_VSI_VLAN_OPS_H_ */
// SPDX-License-Identifier: GPL-2.0-only /* * vl6180.c - Support for STMicroelectronics VL6180 ALS, range and proximity * sensor * * Copyright 2017 Peter Meerwald-Stadler <[email protected]> * Copyright 2017 Manivannan Sadhasivam <[email protected]> * * IIO driver for VL6180 (7-bit I2C slave address 0x29) * * Range: 0 to 100mm * ALS: < 1 Lux up to 100 kLux * IR: 850nm * * TODO: irq, threshold events, continuous mode, hardware buffer */ #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/util_macros.h> #include <linux/iio/iio.h> #include <linux/iio/sysfs.h> #include <linux/iio/buffer.h> #include <linux/iio/trigger.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #define VL6180_DRV_NAME "vl6180" /* Device identification register and value */ #define VL6180_MODEL_ID 0x000 #define VL6180_MODEL_ID_VAL 0xb4 /* Configuration registers */ #define VL6180_INTR_CONFIG 0x014 #define VL6180_INTR_CLEAR 0x015 #define VL6180_OUT_OF_RESET 0x016 #define VL6180_HOLD 0x017 #define VL6180_RANGE_START 0x018 #define VL6180_RANGE_INTER_MEAS_TIME 0x01b #define VL6180_ALS_START 0x038 #define VL6180_ALS_INTER_MEAS_TIME 0x03e #define VL6180_ALS_GAIN 0x03f #define VL6180_ALS_IT 0x040 /* Status registers */ #define VL6180_RANGE_STATUS 0x04d #define VL6180_ALS_STATUS 0x04e #define VL6180_INTR_STATUS 0x04f /* Result value registers */ #define VL6180_ALS_VALUE 0x050 #define VL6180_RANGE_VALUE 0x062 #define VL6180_RANGE_RATE 0x066 /* bits of the RANGE_START and ALS_START register */ #define VL6180_MODE_CONT BIT(1) /* continuous mode */ #define VL6180_STARTSTOP BIT(0) /* start measurement, auto-reset */ /* bits of the INTR_STATUS and INTR_CONFIG register */ #define VL6180_ALS_READY BIT(5) #define VL6180_RANGE_READY BIT(2) /* bits of the INTR_CLEAR register */ #define VL6180_CLEAR_ERROR BIT(2) #define VL6180_CLEAR_ALS BIT(1) #define VL6180_CLEAR_RANGE BIT(0) /* bits of the HOLD register */ #define VL6180_HOLD_ON BIT(0) /* default value for the ALS_IT register */ #define VL6180_ALS_IT_100 0x63 /* 100 ms */ /* values for the ALS_GAIN register */ #define VL6180_ALS_GAIN_1 0x46 #define VL6180_ALS_GAIN_1_25 0x45 #define VL6180_ALS_GAIN_1_67 0x44 #define VL6180_ALS_GAIN_2_5 0x43 #define VL6180_ALS_GAIN_5 0x42 #define VL6180_ALS_GAIN_10 0x41 #define VL6180_ALS_GAIN_20 0x40 #define VL6180_ALS_GAIN_40 0x47 struct vl6180_data { struct i2c_client *client; struct mutex lock; struct completion completion; struct iio_trigger *trig; unsigned int als_gain_milli; unsigned int als_it_ms; unsigned int als_meas_rate; unsigned int range_meas_rate; struct { u16 chan[2]; aligned_s64 timestamp; } scan; }; enum { VL6180_ALS, VL6180_RANGE, VL6180_PROX }; /** * struct vl6180_chan_regs - Registers for accessing channels * @drdy_mask: Data ready bit in status register * @start_reg: Conversion start register * @value_reg: Result value register * @word: Register word length */ struct vl6180_chan_regs { u8 drdy_mask; u16 start_reg, value_reg; bool word; }; static const struct vl6180_chan_regs vl6180_chan_regs_table[] = { [VL6180_ALS] = { .drdy_mask = VL6180_ALS_READY, .start_reg = VL6180_ALS_START, .value_reg = VL6180_ALS_VALUE, .word = true, }, [VL6180_RANGE] = { .drdy_mask = VL6180_RANGE_READY, .start_reg = VL6180_RANGE_START, .value_reg = VL6180_RANGE_VALUE, .word = false, }, [VL6180_PROX] = { .drdy_mask = VL6180_RANGE_READY, .start_reg = VL6180_RANGE_START, .value_reg = VL6180_RANGE_RATE, .word = true, }, }; static int vl6180_read(struct i2c_client *client, u16 cmd, void *databuf, u8 len) { __be16 cmdbuf = cpu_to_be16(cmd); struct i2c_msg msgs[2] = { { .addr = client->addr, .len = sizeof(cmdbuf), .buf = (u8 *) &cmdbuf }, { .addr = client->addr, .len = len, .buf = databuf, .flags = I2C_M_RD } }; int ret; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret < 0) dev_err(&client->dev, "failed reading register 0x%04x\n", cmd); return ret; } static int vl6180_read_byte(struct i2c_client *client, u16 cmd) { u8 data; int ret; ret = vl6180_read(client, cmd, &data, sizeof(data)); if (ret < 0) return ret; return data; } static int vl6180_read_word(struct i2c_client *client, u16 cmd) { __be16 data; int ret; ret = vl6180_read(client, cmd, &data, sizeof(data)); if (ret < 0) return ret; return be16_to_cpu(data); } static int vl6180_write_byte(struct i2c_client *client, u16 cmd, u8 val) { u8 buf[3]; struct i2c_msg msgs[1] = { { .addr = client->addr, .len = sizeof(buf), .buf = (u8 *) &buf } }; int ret; buf[0] = cmd >> 8; buf[1] = cmd & 0xff; buf[2] = val; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret < 0) { dev_err(&client->dev, "failed writing register 0x%04x\n", cmd); return ret; } return 0; } static int vl6180_write_word(struct i2c_client *client, u16 cmd, u16 val) { __be16 buf[2]; struct i2c_msg msgs[1] = { { .addr = client->addr, .len = sizeof(buf), .buf = (u8 *) &buf } }; int ret; buf[0] = cpu_to_be16(cmd); buf[1] = cpu_to_be16(val); ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret < 0) { dev_err(&client->dev, "failed writing register 0x%04x\n", cmd); return ret; } return 0; } static int vl6180_measure(struct vl6180_data *data, int addr) { struct i2c_client *client = data->client; unsigned long time_left; int tries = 20, ret; u16 value; mutex_lock(&data->lock); reinit_completion(&data->completion); /* Start single shot measurement */ ret = vl6180_write_byte(client, vl6180_chan_regs_table[addr].start_reg, VL6180_STARTSTOP); if (ret < 0) goto fail; if (client->irq) { time_left = wait_for_completion_timeout(&data->completion, HZ / 10); if (time_left == 0) { ret = -ETIMEDOUT; goto fail; } } else { while (tries--) { ret = vl6180_read_byte(client, VL6180_INTR_STATUS); if (ret < 0) goto fail; if (ret & vl6180_chan_regs_table[addr].drdy_mask) break; msleep(20); } if (tries < 0) { ret = -EIO; goto fail; } } /* Read result value from appropriate registers */ ret = vl6180_chan_regs_table[addr].word ? vl6180_read_word(client, vl6180_chan_regs_table[addr].value_reg) : vl6180_read_byte(client, vl6180_chan_regs_table[addr].value_reg); if (ret < 0) goto fail; value = ret; /* Clear the interrupt flag after data read */ ret = vl6180_write_byte(client, VL6180_INTR_CLEAR, VL6180_CLEAR_ERROR | VL6180_CLEAR_ALS | VL6180_CLEAR_RANGE); if (ret < 0) goto fail; ret = value; fail: mutex_unlock(&data->lock); return ret; } static const struct iio_chan_spec vl6180_channels[] = { { .type = IIO_LIGHT, .address = VL6180_ALS, .scan_index = VL6180_ALS, .scan_type = { .sign = 'u', .realbits = 16, .storagebits = 16, }, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_INT_TIME) | BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_HARDWAREGAIN) | BIT(IIO_CHAN_INFO_SAMP_FREQ), }, { .type = IIO_DISTANCE, .address = VL6180_RANGE, .scan_index = VL6180_RANGE, .scan_type = { .sign = 'u', .realbits = 8, .storagebits = 8, }, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_SAMP_FREQ), }, { .type = IIO_PROXIMITY, .address = VL6180_PROX, .scan_index = VL6180_PROX, .scan_type = { .sign = 'u', .realbits = 16, .storagebits = 16, }, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), }, IIO_CHAN_SOFT_TIMESTAMP(3), }; /* * Available Ambient Light Sensor gain settings, 1/1000th, and * corresponding setting for the VL6180_ALS_GAIN register */ static const int vl6180_als_gain_tab[8] = { 1000, 1250, 1670, 2500, 5000, 10000, 20000, 40000 }; static const u8 vl6180_als_gain_tab_bits[8] = { VL6180_ALS_GAIN_1, VL6180_ALS_GAIN_1_25, VL6180_ALS_GAIN_1_67, VL6180_ALS_GAIN_2_5, VL6180_ALS_GAIN_5, VL6180_ALS_GAIN_10, VL6180_ALS_GAIN_20, VL6180_ALS_GAIN_40 }; static int vl6180_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct vl6180_data *data = iio_priv(indio_dev); int ret; switch (mask) { case IIO_CHAN_INFO_RAW: ret = vl6180_measure(data, chan->address); if (ret < 0) return ret; *val = ret; return IIO_VAL_INT; case IIO_CHAN_INFO_INT_TIME: *val = data->als_it_ms; *val2 = 1000; return IIO_VAL_FRACTIONAL; case IIO_CHAN_INFO_SCALE: switch (chan->type) { case IIO_LIGHT: /* one ALS count is 0.32 Lux @ gain 1, IT 100 ms */ *val = 32000; /* 0.32 * 1000 * 100 */ *val2 = data->als_gain_milli * data->als_it_ms; return IIO_VAL_FRACTIONAL; case IIO_DISTANCE: *val = 0; /* sensor reports mm, scale to meter */ *val2 = 1000; break; default: return -EINVAL; } return IIO_VAL_INT_PLUS_MICRO; case IIO_CHAN_INFO_HARDWAREGAIN: *val = data->als_gain_milli; *val2 = 1000; return IIO_VAL_FRACTIONAL; case IIO_CHAN_INFO_SAMP_FREQ: switch (chan->type) { case IIO_DISTANCE: *val = data->range_meas_rate; return IIO_VAL_INT; case IIO_LIGHT: *val = data->als_meas_rate; return IIO_VAL_INT; default: return -EINVAL; } default: return -EINVAL; } } static IIO_CONST_ATTR(als_gain_available, "1 1.25 1.67 2.5 5 10 20 40"); static struct attribute *vl6180_attributes[] = { &iio_const_attr_als_gain_available.dev_attr.attr, NULL }; static const struct attribute_group vl6180_attribute_group = { .attrs = vl6180_attributes, }; /* HOLD is needed before updating any config registers */ static int vl6180_hold(struct vl6180_data *data, bool hold) { return vl6180_write_byte(data->client, VL6180_HOLD, hold ? VL6180_HOLD_ON : 0); } static int vl6180_set_als_gain(struct vl6180_data *data, int val, int val2) { int i, ret, gain; if (val < 1 || val > 40) return -EINVAL; gain = (val * 1000000 + val2) / 1000; if (gain < 1 || gain > 40000) return -EINVAL; i = find_closest(gain, vl6180_als_gain_tab, ARRAY_SIZE(vl6180_als_gain_tab)); mutex_lock(&data->lock); ret = vl6180_hold(data, true); if (ret < 0) goto fail; ret = vl6180_write_byte(data->client, VL6180_ALS_GAIN, vl6180_als_gain_tab_bits[i]); if (ret >= 0) data->als_gain_milli = vl6180_als_gain_tab[i]; fail: vl6180_hold(data, false); mutex_unlock(&data->lock); return ret; } static int vl6180_set_it(struct vl6180_data *data, int val, int val2) { int ret, it_ms; it_ms = DIV_ROUND_CLOSEST(val2, 1000); /* round to ms */ if (val != 0 || it_ms < 1 || it_ms > 512) return -EINVAL; mutex_lock(&data->lock); ret = vl6180_hold(data, true); if (ret < 0) goto fail; ret = vl6180_write_word(data->client, VL6180_ALS_IT, it_ms - 1); if (ret >= 0) data->als_it_ms = it_ms; fail: vl6180_hold(data, false); mutex_unlock(&data->lock); return ret; } static int vl6180_meas_reg_val_from_mhz(unsigned int mhz) { unsigned int period = DIV_ROUND_CLOSEST(1000 * 1000, mhz); unsigned int reg_val = 0; if (period > 10) reg_val = period < 2550 ? (DIV_ROUND_CLOSEST(period, 10) - 1) : 254; return reg_val; } static int vl6180_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct vl6180_data *data = iio_priv(indio_dev); unsigned int reg_val; switch (mask) { case IIO_CHAN_INFO_INT_TIME: return vl6180_set_it(data, val, val2); case IIO_CHAN_INFO_HARDWAREGAIN: if (chan->type != IIO_LIGHT) return -EINVAL; return vl6180_set_als_gain(data, val, val2); case IIO_CHAN_INFO_SAMP_FREQ: { guard(mutex)(&data->lock); switch (chan->type) { case IIO_DISTANCE: data->range_meas_rate = val; reg_val = vl6180_meas_reg_val_from_mhz(val); return vl6180_write_byte(data->client, VL6180_RANGE_INTER_MEAS_TIME, reg_val); case IIO_LIGHT: data->als_meas_rate = val; reg_val = vl6180_meas_reg_val_from_mhz(val); return vl6180_write_byte(data->client, VL6180_ALS_INTER_MEAS_TIME, reg_val); default: return -EINVAL; } } default: return -EINVAL; } } static irqreturn_t vl6180_threaded_irq(int irq, void *priv) { struct iio_dev *indio_dev = priv; struct vl6180_data *data = iio_priv(indio_dev); if (iio_buffer_enabled(indio_dev)) iio_trigger_poll_nested(indio_dev->trig); else complete(&data->completion); return IRQ_HANDLED; } static irqreturn_t vl6180_trigger_handler(int irq, void *priv) { struct iio_poll_func *pf = priv; struct iio_dev *indio_dev = pf->indio_dev; struct vl6180_data *data = iio_priv(indio_dev); s64 time_ns = iio_get_time_ns(indio_dev); int ret, bit, i = 0; iio_for_each_active_channel(indio_dev, bit) { if (vl6180_chan_regs_table[bit].word) ret = vl6180_read_word(data->client, vl6180_chan_regs_table[bit].value_reg); else ret = vl6180_read_byte(data->client, vl6180_chan_regs_table[bit].value_reg); if (ret < 0) { dev_err(&data->client->dev, "failed to read from value regs: %d\n", ret); return IRQ_HANDLED; } data->scan.chan[i++] = ret; } iio_push_to_buffers_with_timestamp(indio_dev, &data->scan, time_ns); iio_trigger_notify_done(indio_dev->trig); /* Clear the interrupt flag after data read */ ret = vl6180_write_byte(data->client, VL6180_INTR_CLEAR, VL6180_CLEAR_ERROR | VL6180_CLEAR_ALS | VL6180_CLEAR_RANGE); if (ret < 0) dev_err(&data->client->dev, "failed to clear irq: %d\n", ret); return IRQ_HANDLED; } static const struct iio_info vl6180_info = { .read_raw = vl6180_read_raw, .write_raw = vl6180_write_raw, .attrs = &vl6180_attribute_group, .validate_trigger = iio_validate_own_trigger, }; static int vl6180_buffer_postenable(struct iio_dev *indio_dev) { struct vl6180_data *data = iio_priv(indio_dev); int bit; iio_for_each_active_channel(indio_dev, bit) return vl6180_write_byte(data->client, vl6180_chan_regs_table[bit].start_reg, VL6180_MODE_CONT | VL6180_STARTSTOP); return -EINVAL; } static int vl6180_buffer_postdisable(struct iio_dev *indio_dev) { struct vl6180_data *data = iio_priv(indio_dev); int bit; iio_for_each_active_channel(indio_dev, bit) return vl6180_write_byte(data->client, vl6180_chan_regs_table[bit].start_reg, VL6180_STARTSTOP); return -EINVAL; } static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = { .postenable = &vl6180_buffer_postenable, .postdisable = &vl6180_buffer_postdisable, }; static const struct iio_trigger_ops vl6180_trigger_ops = { .validate_device = iio_trigger_validate_own_device, }; static int vl6180_init(struct vl6180_data *data, struct iio_dev *indio_dev) { struct i2c_client *client = data->client; int ret; ret = vl6180_read_byte(client, VL6180_MODEL_ID); if (ret < 0) return ret; if (ret != VL6180_MODEL_ID_VAL) { dev_err(&client->dev, "invalid model ID %02x\n", ret); return -ENODEV; } ret = vl6180_hold(data, true); if (ret < 0) return ret; ret = vl6180_read_byte(client, VL6180_OUT_OF_RESET); if (ret < 0) return ret; /* * Detect false reset condition here. This bit is always set when the * system comes out of reset. */ if (ret != 0x01) dev_info(&client->dev, "device is not fresh out of reset\n"); /* Enable ALS and Range ready interrupts */ ret = vl6180_write_byte(client, VL6180_INTR_CONFIG, VL6180_ALS_READY | VL6180_RANGE_READY); if (ret < 0) return ret; ret = devm_iio_triggered_buffer_setup(&client->dev, indio_dev, NULL, &vl6180_trigger_handler, &iio_triggered_buffer_setup_ops); if (ret) return ret; /* Default Range inter-measurement time: 50ms or 20000 mHz */ ret = vl6180_write_byte(client, VL6180_RANGE_INTER_MEAS_TIME, vl6180_meas_reg_val_from_mhz(20000)); if (ret < 0) return ret; data->range_meas_rate = 20000; /* Default ALS inter-measurement time: 10ms or 100000 mHz */ ret = vl6180_write_byte(client, VL6180_ALS_INTER_MEAS_TIME, vl6180_meas_reg_val_from_mhz(100000)); if (ret < 0) return ret; data->als_meas_rate = 100000; /* ALS integration time: 100ms */ data->als_it_ms = 100; ret = vl6180_write_word(client, VL6180_ALS_IT, VL6180_ALS_IT_100); if (ret < 0) return ret; /* ALS gain: 1 */ data->als_gain_milli = 1000; ret = vl6180_write_byte(client, VL6180_ALS_GAIN, VL6180_ALS_GAIN_1); if (ret < 0) return ret; ret = vl6180_write_byte(client, VL6180_OUT_OF_RESET, 0x00); if (ret < 0) return ret; return vl6180_hold(data, false); } static int vl6180_probe(struct i2c_client *client) { struct vl6180_data *data; struct iio_dev *indio_dev; int ret; indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); if (!indio_dev) return -ENOMEM; data = iio_priv(indio_dev); i2c_set_clientdata(client, indio_dev); data->client = client; mutex_init(&data->lock); indio_dev->info = &vl6180_info; indio_dev->channels = vl6180_channels; indio_dev->num_channels = ARRAY_SIZE(vl6180_channels); indio_dev->name = VL6180_DRV_NAME; indio_dev->modes = INDIO_DIRECT_MODE; ret = vl6180_init(data, indio_dev); if (ret < 0) return ret; if (client->irq) { ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, vl6180_threaded_irq, IRQF_ONESHOT, indio_dev->name, indio_dev); if (ret) return dev_err_probe(&client->dev, ret, "devm_request_irq error \n"); init_completion(&data->completion); data->trig = devm_iio_trigger_alloc(&client->dev, "%s-dev%d", indio_dev->name, iio_device_id(indio_dev)); if (!data->trig) return -ENOMEM; data->trig->ops = &vl6180_trigger_ops; iio_trigger_set_drvdata(data->trig, indio_dev); ret = devm_iio_trigger_register(&client->dev, data->trig); if (ret) return ret; indio_dev->trig = iio_trigger_get(data->trig); } return devm_iio_device_register(&client->dev, indio_dev); } static const struct of_device_id vl6180_of_match[] = { { .compatible = "st,vl6180", }, { }, }; MODULE_DEVICE_TABLE(of, vl6180_of_match); static const struct i2c_device_id vl6180_id[] = { { "vl6180" }, { } }; MODULE_DEVICE_TABLE(i2c, vl6180_id); static struct i2c_driver vl6180_driver = { .driver = { .name = VL6180_DRV_NAME, .of_match_table = vl6180_of_match, }, .probe = vl6180_probe, .id_table = vl6180_id, }; module_i2c_driver(vl6180_driver); MODULE_AUTHOR("Peter Meerwald-Stadler <[email protected]>"); MODULE_AUTHOR("Manivannan Sadhasivam <[email protected]>"); MODULE_DESCRIPTION("STMicro VL6180 ALS, range and proximity sensor driver"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0 */ /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. */ #ifndef __IA_CSS_XNR_HOST_H #define __IA_CSS_XNR_HOST_H #include "sh_css_params.h" #include "ia_css_xnr_param.h" #include "ia_css_xnr_table.host.h" extern const struct ia_css_xnr_config default_xnr_config; void ia_css_xnr_table_vamem_encode( struct sh_css_isp_xnr_vamem_params *to, const struct ia_css_xnr_table *from, unsigned int size); void ia_css_xnr_encode( struct sh_css_isp_xnr_params *to, const struct ia_css_xnr_config *from, unsigned int size); void ia_css_xnr_table_debug_dtrace( const struct ia_css_xnr_table *s3a, unsigned int level); void ia_css_xnr_debug_dtrace( const struct ia_css_xnr_config *config, unsigned int level); #endif /* __IA_CSS_XNR_HOST_H */
/* $Id$ * 1993/03/31 * linux/kernel/aha1740.c * * Based loosely on aha1542.c which is * Copyright (C) 1992 Tommy Thorn and * Modified by Eric Youngdale * * This file is aha1740.c, written and * Copyright (C) 1992,1993 Brad McLean * [email protected] or [email protected]. * * Modifications to makecode and queuecommand * for proper handling of multiple devices courteously * provided by Michael Weller, March, 1993 * * Multiple adapter support, extended translation detection, * update to current scsi subsystem changes, proc fs support, * working (!) module support based on patches from Andreas Arens, * by Andreas Degert <[email protected]>, 2/1997 * * aha1740_makecode may still need even more work * if it doesn't work for your devices, take a look. * * Reworked for new_eh and new locking by Alan Cox <[email protected]> * * Converted to EISA and generic DMA APIs by Marc Zyngier * <[email protected]>, 4/2003. * * Shared interrupt support added by Rask Ingemann Lambertsen * <[email protected]>, 10/2003 * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. */ #include <linux/blkdev.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/device.h> #include <linux/eisa.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <asm/dma.h> #include <asm/io.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_host.h> #include <scsi/scsi_tcq.h> #include "aha1740.h" /* IF YOU ARE HAVING PROBLEMS WITH THIS DRIVER, AND WANT TO WATCH IT WORK, THEN: #define DEBUG */ #ifdef DEBUG #define DEB(x) x #else #define DEB(x) #endif struct aha1740_hostdata { struct eisa_device *edev; unsigned int translation; unsigned int last_ecb_used; dma_addr_t ecb_dma_addr; struct ecb ecb[AHA1740_ECBS]; }; struct aha1740_sg { struct aha1740_chain sg_chain[AHA1740_SCATTER]; dma_addr_t sg_dma_addr; dma_addr_t buf_dma_addr; }; #define HOSTDATA(host) ((struct aha1740_hostdata *) &host->hostdata) static inline struct ecb *ecb_dma_to_cpu (struct Scsi_Host *host, dma_addr_t dma) { struct aha1740_hostdata *hdata = HOSTDATA (host); dma_addr_t offset; offset = dma - hdata->ecb_dma_addr; return (struct ecb *)(((char *) hdata->ecb) + (unsigned int) offset); } static inline dma_addr_t ecb_cpu_to_dma (struct Scsi_Host *host, void *cpu) { struct aha1740_hostdata *hdata = HOSTDATA (host); dma_addr_t offset; offset = (char *) cpu - (char *) hdata->ecb; return hdata->ecb_dma_addr + offset; } static int aha1740_show_info(struct seq_file *m, struct Scsi_Host *shpnt) { struct aha1740_hostdata *host = HOSTDATA(shpnt); seq_printf(m, "aha174x at IO:%lx, IRQ %d, SLOT %d.\n" "Extended translation %sabled.\n", shpnt->io_port, shpnt->irq, host->edev->slot, host->translation ? "en" : "dis"); return 0; } static int aha1740_makecode(unchar *sense, unchar *status) { struct statusword { ushort don:1, /* Command Done - No Error */ du:1, /* Data underrun */ :1, qf:1, /* Queue full */ sc:1, /* Specification Check */ dor:1, /* Data overrun */ ch:1, /* Chaining Halted */ intr:1, /* Interrupt issued */ asa:1, /* Additional Status Available */ sns:1, /* Sense information Stored */ :1, ini:1, /* Initialization Required */ me:1, /* Major error or exception */ :1, eca:1, /* Extended Contingent alliance */ :1; } status_word; int retval = DID_OK; status_word = * (struct statusword *) status; #ifdef DEBUG printk("makecode from %x,%x,%x,%x %x,%x,%x,%x", status[0], status[1], status[2], status[3], sense[0], sense[1], sense[2], sense[3]); #endif if (!status_word.don) { /* Anything abnormal was detected */ if ( (status[1]&0x18) || status_word.sc ) { /*Additional info available*/ /* Use the supplied info for further diagnostics */ switch ( status[2] ) { case 0x12: if ( status_word.dor ) retval=DID_ERROR; /* It's an Overrun */ /* If not overrun, assume underrun and * ignore it! */ break; case 0x00: /* No info, assume no error, should * not occur */ break; case 0x11: case 0x21: retval=DID_TIME_OUT; break; case 0x0a: retval=DID_BAD_TARGET; break; case 0x04: case 0x05: retval=DID_ABORT; /* Either by this driver or the * AHA1740 itself */ break; default: retval=DID_ERROR; /* No further * diagnostics * possible */ } } else { /* Michael suggests, and Brad concurs: */ if ( status_word.qf ) { retval = DID_TIME_OUT; /* forces a redo */ /* I think this specific one should * not happen -Brad */ printk("aha1740.c: WARNING: AHA1740 queue overflow!\n"); } else if ( status[0]&0x60 ) { /* Didn't find a better error */ retval = DID_ERROR; } /* In any other case return DID_OK so for example CONDITION_CHECKS make it through to the appropriate device driver */ } } /* Under all circumstances supply the target status -Michael */ return status[3] | retval << 16; } static int aha1740_test_port(unsigned int base) { if ( inb(PORTADR(base)) & PORTADDR_ENH ) return 1; /* Okay, we're all set */ printk("aha174x: Board detected, but not in enhanced mode, so disabled it.\n"); return 0; } /* A "high" level interrupt handler */ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id) { struct Scsi_Host *host = (struct Scsi_Host *) dev_id; void (*my_done)(struct scsi_cmnd *); int errstatus, adapstat; int number_serviced; struct ecb *ecbptr; struct scsi_cmnd *SCtmp; unsigned int base; unsigned long flags; int handled = 0; struct aha1740_sg *sgptr; struct eisa_device *edev; if (!host) panic("aha1740.c: Irq from unknown host!\n"); spin_lock_irqsave(host->host_lock, flags); base = host->io_port; number_serviced = 0; edev = HOSTDATA(host)->edev; while(inb(G2STAT(base)) & G2STAT_INTPEND) { handled = 1; DEB(printk("aha1740_intr top of loop.\n")); adapstat = inb(G2INTST(base)); ecbptr = ecb_dma_to_cpu (host, inl(MBOXIN0(base))); outb(G2CNTRL_IRST,G2CNTRL(base)); /* interrupt reset */ switch ( adapstat & G2INTST_MASK ) { case G2INTST_CCBRETRY: case G2INTST_CCBERROR: case G2INTST_CCBGOOD: /* Host Ready -> Mailbox in complete */ outb(G2CNTRL_HRDY,G2CNTRL(base)); if (!ecbptr) { printk("Aha1740 null ecbptr in interrupt (%x,%x,%x,%d)\n", inb(G2STAT(base)),adapstat, inb(G2INTST(base)), number_serviced++); continue; } SCtmp = ecbptr->SCpnt; if (!SCtmp) { printk("Aha1740 null SCtmp in interrupt (%x,%x,%x,%d)\n", inb(G2STAT(base)),adapstat, inb(G2INTST(base)), number_serviced++); continue; } sgptr = (struct aha1740_sg *) SCtmp->host_scribble; scsi_dma_unmap(SCtmp); /* Free the sg block */ dma_free_coherent (&edev->dev, sizeof (struct aha1740_sg), SCtmp->host_scribble, sgptr->sg_dma_addr); /* Fetch the sense data, and tuck it away, in the required slot. The Adaptec automatically fetches it, and there is no guarantee that we will still have it in the cdb when we come back */ if ( (adapstat & G2INTST_MASK) == G2INTST_CCBERROR ) { memcpy_and_pad(SCtmp->sense_buffer, SCSI_SENSE_BUFFERSIZE, ecbptr->sense, sizeof(ecbptr->sense), 0); errstatus = aha1740_makecode(ecbptr->sense,ecbptr->status); } else errstatus = 0; DEB(if (errstatus) printk("aha1740_intr_handle: returning %6x\n", errstatus)); SCtmp->result = errstatus; my_done = ecbptr->done; memset(ecbptr,0,sizeof(struct ecb)); if ( my_done ) my_done(SCtmp); break; case G2INTST_HARDFAIL: printk(KERN_ALERT "aha1740 hardware failure!\n"); panic("aha1740.c"); /* Goodbye */ case G2INTST_ASNEVENT: printk("aha1740 asynchronous event: %02x %02x %02x %02x %02x\n", adapstat, inb(MBOXIN0(base)), inb(MBOXIN1(base)), inb(MBOXIN2(base)), inb(MBOXIN3(base))); /* Say What? */ /* Host Ready -> Mailbox in complete */ outb(G2CNTRL_HRDY,G2CNTRL(base)); break; case G2INTST_CMDGOOD: /* set immediate command success flag here: */ break; case G2INTST_CMDERROR: /* Set immediate command failure flag here: */ break; } number_serviced++; } spin_unlock_irqrestore(host->host_lock, flags); return IRQ_RETVAL(handled); } static int aha1740_queuecommand_lck(struct scsi_cmnd *SCpnt) { void (*done)(struct scsi_cmnd *) = scsi_done; unchar direction; unchar *cmd = (unchar *) SCpnt->cmnd; unchar target = scmd_id(SCpnt); struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host); unsigned long flags; dma_addr_t sg_dma; struct aha1740_sg *sgptr; int ecbno, nseg; DEB(int i); if(*cmd == REQUEST_SENSE) { SCpnt->result = 0; done(SCpnt); return 0; } #ifdef DEBUG if (*cmd == READ_10 || *cmd == WRITE_10) i = xscsi2int(cmd+2); else if (*cmd == READ_6 || *cmd == WRITE_6) i = scsi2int(cmd+2); else i = -1; printk("aha1740_queuecommand: dev %d cmd %02x pos %d len %d ", target, *cmd, i, bufflen); printk("scsi cmd:"); for (i = 0; i < SCpnt->cmd_len; i++) printk("%02x ", cmd[i]); printk("\n"); #endif /* locate an available ecb */ spin_lock_irqsave(SCpnt->device->host->host_lock, flags); ecbno = host->last_ecb_used + 1; /* An optimization */ if (ecbno >= AHA1740_ECBS) ecbno = 0; do { if (!host->ecb[ecbno].cmdw) break; ecbno++; if (ecbno >= AHA1740_ECBS) ecbno = 0; } while (ecbno != host->last_ecb_used); if (host->ecb[ecbno].cmdw) panic("Unable to find empty ecb for aha1740.\n"); host->ecb[ecbno].cmdw = AHA1740CMD_INIT; /* SCSI Initiator Command doubles as reserved flag */ host->last_ecb_used = ecbno; spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); #ifdef DEBUG printk("Sending command (%d %x)...", ecbno, done); #endif host->ecb[ecbno].cdblen = SCpnt->cmd_len; /* SCSI Command * Descriptor Block * Length */ direction = 0; if (*cmd == READ_10 || *cmd == READ_6) direction = 1; else if (*cmd == WRITE_10 || *cmd == WRITE_6) direction = 0; memcpy(host->ecb[ecbno].cdb, cmd, SCpnt->cmd_len); SCpnt->host_scribble = dma_alloc_coherent (&host->edev->dev, sizeof (struct aha1740_sg), &sg_dma, GFP_ATOMIC); if(SCpnt->host_scribble == NULL) { printk(KERN_WARNING "aha1740: out of memory in queuecommand!\n"); return 1; } sgptr = (struct aha1740_sg *) SCpnt->host_scribble; sgptr->sg_dma_addr = sg_dma; nseg = scsi_dma_map(SCpnt); BUG_ON(nseg < 0); if (nseg) { struct scatterlist *sg; struct aha1740_chain * cptr; int i; DEB(unsigned char * ptr); host->ecb[ecbno].sg = 1; /* SCSI Initiator Command * w/scatter-gather*/ cptr = sgptr->sg_chain; scsi_for_each_sg(SCpnt, sg, nseg, i) { cptr[i].datalen = sg_dma_len (sg); cptr[i].dataptr = sg_dma_address (sg); } host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain); host->ecb[ecbno].dataptr = sg_dma; #ifdef DEBUG printk("cptr %x: ",cptr); ptr = (unsigned char *) cptr; for(i=0;i<24;i++) printk("%02x ", ptr[i]); #endif } else { host->ecb[ecbno].datalen = 0; host->ecb[ecbno].dataptr = 0; } host->ecb[ecbno].lun = SCpnt->device->lun; host->ecb[ecbno].ses = 1; /* Suppress underrun errors */ host->ecb[ecbno].dir = direction; host->ecb[ecbno].ars = 1; /* Yes, get the sense on an error */ host->ecb[ecbno].senselen = 12; host->ecb[ecbno].senseptr = ecb_cpu_to_dma (SCpnt->device->host, host->ecb[ecbno].sense); host->ecb[ecbno].statusptr = ecb_cpu_to_dma (SCpnt->device->host, host->ecb[ecbno].status); host->ecb[ecbno].done = done; host->ecb[ecbno].SCpnt = SCpnt; #ifdef DEBUG { int i; printk("aha1740_command: sending.. "); for (i = 0; i < sizeof(host->ecb[ecbno]) - 10; i++) printk("%02x ", ((unchar *)&host->ecb[ecbno])[i]); } printk("\n"); #endif if (done) { /* The Adaptec Spec says the card is so fast that the loops will only be executed once in the code below. Even if this was true with the fastest processors when the spec was written, it doesn't seem to be true with today's fast processors. We print a warning if the code is executed more often than LOOPCNT_WARN. If this happens, it should be investigated. If the count reaches LOOPCNT_MAX, we assume something is broken; since there is no way to return an error (the return value is ignored by the mid-level scsi layer) we have to panic (and maybe that's the best thing we can do then anyhow). */ #define LOOPCNT_WARN 10 /* excessive mbxout wait -> syslog-msg */ #define LOOPCNT_MAX 1000000 /* mbxout deadlock -> panic() after ~ 2 sec. */ int loopcnt; unsigned int base = SCpnt->device->host->io_port; DEB(printk("aha1740[%d] critical section\n",ecbno)); spin_lock_irqsave(SCpnt->device->host->host_lock, flags); for (loopcnt = 0; ; loopcnt++) { if (inb(G2STAT(base)) & G2STAT_MBXOUT) break; if (loopcnt == LOOPCNT_WARN) { printk("aha1740[%d]_mbxout wait!\n",ecbno); } if (loopcnt == LOOPCNT_MAX) panic("aha1740.c: mbxout busy!\n"); } outl (ecb_cpu_to_dma (SCpnt->device->host, host->ecb + ecbno), MBOXOUT0(base)); for (loopcnt = 0; ; loopcnt++) { if (! (inb(G2STAT(base)) & G2STAT_BUSY)) break; if (loopcnt == LOOPCNT_WARN) { printk("aha1740[%d]_attn wait!\n",ecbno); } if (loopcnt == LOOPCNT_MAX) panic("aha1740.c: attn wait failed!\n"); } outb(ATTN_START | (target & 7), ATTN(base)); /* Start it up */ spin_unlock_irqrestore(SCpnt->device->host->host_lock, flags); DEB(printk("aha1740[%d] request queued.\n",ecbno)); } else printk(KERN_ALERT "aha1740_queuecommand: done can't be NULL\n"); return 0; } static DEF_SCSI_QCMD(aha1740_queuecommand) /* Query the board for its irq_level and irq_type. Nothing else matters in enhanced mode on an EISA bus. */ static void aha1740_getconfig(unsigned int base, unsigned int *irq_level, unsigned int *irq_type, unsigned int *translation) { static int intab[] = { 9, 10, 11, 12, 0, 14, 15, 0 }; *irq_level = intab[inb(INTDEF(base)) & 0x7]; *irq_type = (inb(INTDEF(base)) & 0x8) >> 3; *translation = inb(RESV1(base)) & 0x1; outb(inb(INTDEF(base)) | 0x10, INTDEF(base)); } static int aha1740_biosparam(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int* ip) { int size = capacity; int extended = HOSTDATA(sdev->host)->translation; DEB(printk("aha1740_biosparam\n")); if (extended && (ip[2] > 1024)) { ip[0] = 255; ip[1] = 63; ip[2] = size / (255 * 63); } else { ip[0] = 64; ip[1] = 32; ip[2] = size >> 11; } return 0; } static int aha1740_eh_abort_handler (struct scsi_cmnd *dummy) { /* * From Alan Cox : * The AHA1740 has firmware handled abort/reset handling. The "head in * sand" kernel code is correct for once 8) * * So we define a dummy handler just to keep the kernel SCSI code as * quiet as possible... */ return SUCCESS; } static const struct scsi_host_template aha1740_template = { .module = THIS_MODULE, .proc_name = "aha1740", .show_info = aha1740_show_info, .name = "Adaptec 174x (EISA)", .queuecommand = aha1740_queuecommand, .bios_param = aha1740_biosparam, .can_queue = AHA1740_ECBS, .this_id = 7, .sg_tablesize = AHA1740_SCATTER, .eh_abort_handler = aha1740_eh_abort_handler, }; static int aha1740_probe (struct device *dev) { int slotbase, rc; unsigned int irq_level, irq_type, translation; struct Scsi_Host *shpnt; struct aha1740_hostdata *host; struct eisa_device *edev = to_eisa_device (dev); DEB(printk("aha1740_probe: \n")); slotbase = edev->base_addr + EISA_VENDOR_ID_OFFSET; if (!request_region(slotbase, SLOTSIZE, "aha1740")) /* See if in use */ return -EBUSY; if (!aha1740_test_port(slotbase)) goto err_release_region; aha1740_getconfig(slotbase,&irq_level,&irq_type,&translation); if ((inb(G2STAT(slotbase)) & (G2STAT_MBXOUT|G2STAT_BUSY)) != G2STAT_MBXOUT) { /* If the card isn't ready, hard reset it */ outb(G2CNTRL_HRST, G2CNTRL(slotbase)); outb(0, G2CNTRL(slotbase)); } printk(KERN_INFO "Configuring slot %d at IO:%x, IRQ %u (%s)\n", edev->slot, slotbase, irq_level, irq_type ? "edge" : "level"); printk(KERN_INFO "aha174x: Extended translation %sabled.\n", translation ? "en" : "dis"); shpnt = scsi_host_alloc(&aha1740_template, sizeof(struct aha1740_hostdata)); if(shpnt == NULL) goto err_release_region; shpnt->base = 0; shpnt->io_port = slotbase; shpnt->n_io_port = SLOTSIZE; shpnt->irq = irq_level; shpnt->dma_channel = 0xff; host = HOSTDATA(shpnt); host->edev = edev; host->translation = translation; host->ecb_dma_addr = dma_map_single (&edev->dev, host->ecb, sizeof (host->ecb), DMA_BIDIRECTIONAL); if (!host->ecb_dma_addr) { printk (KERN_ERR "aha1740_probe: Couldn't map ECB, giving up\n"); goto err_host_put; } DEB(printk("aha1740_probe: enable interrupt channel %d\n",irq_level)); if (request_irq(irq_level,aha1740_intr_handle,irq_type ? 0 : IRQF_SHARED, "aha1740",shpnt)) { printk(KERN_ERR "aha1740_probe: Unable to allocate IRQ %d.\n", irq_level); goto err_unmap; } eisa_set_drvdata (edev, shpnt); rc = scsi_add_host (shpnt, dev); if (rc) goto err_irq; scsi_scan_host (shpnt); return 0; err_irq: free_irq(irq_level, shpnt); err_unmap: dma_unmap_single (&edev->dev, host->ecb_dma_addr, sizeof (host->ecb), DMA_BIDIRECTIONAL); err_host_put: scsi_host_put (shpnt); err_release_region: release_region(slotbase, SLOTSIZE); return -ENODEV; } static int aha1740_remove (struct device *dev) { struct Scsi_Host *shpnt = dev_get_drvdata(dev); struct aha1740_hostdata *host = HOSTDATA (shpnt); scsi_remove_host(shpnt); free_irq (shpnt->irq, shpnt); dma_unmap_single (dev, host->ecb_dma_addr, sizeof (host->ecb), DMA_BIDIRECTIONAL); release_region (shpnt->io_port, SLOTSIZE); scsi_host_put (shpnt); return 0; } static struct eisa_device_id aha1740_ids[] = { { "ADP0000" }, /* 1740 */ { "ADP0001" }, /* 1740A */ { "ADP0002" }, /* 1742A */ { "ADP0400" }, /* 1744 */ { "" } }; MODULE_DEVICE_TABLE(eisa, aha1740_ids); static struct eisa_driver aha1740_driver = { .id_table = aha1740_ids, .driver = { .name = "aha1740", .probe = aha1740_probe, .remove = aha1740_remove, }, }; static __init int aha1740_init (void) { return eisa_driver_register (&aha1740_driver); } static __exit void aha1740_exit (void) { eisa_driver_unregister (&aha1740_driver); } module_init (aha1740_init); module_exit (aha1740_exit); MODULE_DESCRIPTION("Adaptec AHA1740 SCSI host adapter driver"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2022 MediaTek Corporation. All rights reserved. * Author: Allen-KH Cheng <[email protected]> */ #include <linux/firmware/mediatek/mtk-adsp-ipc.h> #include <linux/kernel.h> #include <linux/mailbox_client.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> static const char * const adsp_mbox_ch_names[MTK_ADSP_MBOX_NUM] = { "rx", "tx" }; /* * mtk_adsp_ipc_send - send ipc cmd to MTK ADSP * * @ipc: ADSP IPC handle * @idx: index of the mailbox channel * @msg: IPC cmd (reply or request) * * Returns zero for success from mbox_send_message * negative value for error */ int mtk_adsp_ipc_send(struct mtk_adsp_ipc *ipc, unsigned int idx, uint32_t msg) { struct mtk_adsp_chan *adsp_chan; int ret; if (idx >= MTK_ADSP_MBOX_NUM) return -EINVAL; adsp_chan = &ipc->chans[idx]; ret = mbox_send_message(adsp_chan->ch, &msg); if (ret < 0) return ret; return 0; } EXPORT_SYMBOL_GPL(mtk_adsp_ipc_send); /* * mtk_adsp_ipc_recv - recv callback used by MTK ADSP mailbox * * @c: mbox client * @msg: message received * * Users of ADSP IPC will need to privde handle_reply and handle_request * callbacks. */ static void mtk_adsp_ipc_recv(struct mbox_client *c, void *msg) { struct mtk_adsp_chan *chan = container_of(c, struct mtk_adsp_chan, cl); struct device *dev = c->dev; switch (chan->idx) { case MTK_ADSP_MBOX_REPLY: chan->ipc->ops->handle_reply(chan->ipc); break; case MTK_ADSP_MBOX_REQUEST: chan->ipc->ops->handle_request(chan->ipc); break; default: dev_err(dev, "wrong mbox chan %d\n", chan->idx); break; } } static int mtk_adsp_ipc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mtk_adsp_ipc *adsp_ipc; struct mtk_adsp_chan *adsp_chan; struct mbox_client *cl; int ret; int i, j; device_set_of_node_from_dev(&pdev->dev, pdev->dev.parent); adsp_ipc = devm_kzalloc(dev, sizeof(*adsp_ipc), GFP_KERNEL); if (!adsp_ipc) return -ENOMEM; for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) { adsp_chan = &adsp_ipc->chans[i]; cl = &adsp_chan->cl; cl->dev = dev->parent; cl->tx_block = false; cl->knows_txdone = false; cl->tx_prepare = NULL; cl->rx_callback = mtk_adsp_ipc_recv; adsp_chan->ipc = adsp_ipc; adsp_chan->idx = i; adsp_chan->ch = mbox_request_channel_byname(cl, adsp_mbox_ch_names[i]); if (IS_ERR(adsp_chan->ch)) { ret = dev_err_probe(dev, PTR_ERR(adsp_chan->ch), "Failed to request mbox channel %s\n", adsp_mbox_ch_names[i]); for (j = 0; j < i; j++) { adsp_chan = &adsp_ipc->chans[j]; mbox_free_channel(adsp_chan->ch); } return ret; } } adsp_ipc->dev = dev; dev_set_drvdata(dev, adsp_ipc); dev_dbg(dev, "MTK ADSP IPC initialized\n"); return 0; } static void mtk_adsp_ipc_remove(struct platform_device *pdev) { struct mtk_adsp_ipc *adsp_ipc = dev_get_drvdata(&pdev->dev); struct mtk_adsp_chan *adsp_chan; int i; for (i = 0; i < MTK_ADSP_MBOX_NUM; i++) { adsp_chan = &adsp_ipc->chans[i]; mbox_free_channel(adsp_chan->ch); } } static struct platform_driver mtk_adsp_ipc_driver = { .driver = { .name = "mtk-adsp-ipc", }, .probe = mtk_adsp_ipc_probe, .remove = mtk_adsp_ipc_remove, }; builtin_platform_driver(mtk_adsp_ipc_driver); MODULE_AUTHOR("Allen-KH Cheng <[email protected]>"); MODULE_DESCRIPTION("MTK ADSP IPC Driver"); MODULE_LICENSE("GPL");
/* * Copyright 2012-15 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DAL_AMDGPU_DM_MST_TYPES_H__ #define __DAL_AMDGPU_DM_MST_TYPES_H__ #define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24 #define SYNAPTICS_RC_COMMAND 0x4B2 #define SYNAPTICS_RC_RESULT 0x4B3 #define SYNAPTICS_RC_LENGTH 0x4B8 #define SYNAPTICS_RC_OFFSET 0x4BC #define SYNAPTICS_RC_DATA 0x4C0 #define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C /** * Panamera MST Hub detection * Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case * Check from beginning of branch device vendor specific field (050Ch) */ #define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0) #define BRANCH_HW_REVISION_PANAMERA_A2 0x10 #define SYNAPTICS_CASCADED_HUB_ID 0x5A #define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0) #define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031 #define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000 enum mst_msg_ready_type { NONE_MSG_RDY_EVENT = 0, DOWN_REP_MSG_RDY_EVENT = 1, UP_REQ_MSG_RDY_EVENT = 2, DOWN_OR_UP_MSG_RDY_EVENT = 3 }; struct amdgpu_display_manager; struct amdgpu_dm_connector; int dm_mst_get_pbn_divider(struct dc_link *link); void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, struct amdgpu_dm_connector *aconnector, int link_index); void dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); void dm_handle_mst_sideband_msg_ready_event( struct drm_dp_mst_topology_mgr *mgr, enum mst_msg_ready_type msg_rdy_type); struct dsc_mst_fairness_vars { int pbn; bool dsc_enabled; int bpp_x16; struct amdgpu_dm_connector *aconnector; }; int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_state *dc_state, struct dsc_mst_fairness_vars *vars); bool needs_dsc_aux_workaround(struct dc_link *link); int pre_validate_dsc(struct drm_atomic_state *state, struct dm_atomic_state **dm_state_ptr, struct dsc_mst_fairness_vars *vars); enum dc_status dm_dp_mst_is_port_support_mode( struct amdgpu_dm_connector *aconnector, struct dc_stream_state *stream); #endif
// SPDX-License-Identifier: GPL-2.0 /* * Hardkernel Odroid XU3-Lite board device tree source * * Copyright (c) 2015 Krzysztof Kozlowski * Copyright (c) 2014 Collabora Ltd. * Copyright (c) 2013 Samsung Electronics Co., Ltd. * http://www.samsung.com */ /dts-v1/; #include "exynos5422-odroidxu3-common.dtsi" #include "exynos5422-odroidxu3-audio.dtsi" #include "exynos54xx-odroidxu-leds.dtsi" / { model = "Hardkernel Odroid XU3 Lite"; compatible = "hardkernel,odroid-xu3-lite", "samsung,exynos5800", "samsung,exynos5"; aliases { ethernet = &ethernet; }; }; &arm_a7_pmu { status = "disabled"; }; &arm_a15_pmu { status = "disabled"; }; &chipid { samsung,asv-bin = <2>; }; /* * Odroid XU3-Lite board uses SoC revision with lower maximum frequencies * than Odroid XU3/XU4 boards: 1.8 GHz for A15 cores & 1.3 GHz for A7 cores. * Therefore we need to update OPPs tables and thermal maps accordingly. */ &cluster_a15_opp_table { /delete-node/opp-2000000000; /delete-node/opp-1900000000; }; &cluster_a7_opp_table { /delete-node/opp-1400000000; }; &cpu0_cooling_map4 { cooling-device = <&cpu0 3 7>, <&cpu1 3 7>, <&cpu2 3 7>, <&cpu3 3 7>, <&cpu4 3 12>, <&cpu5 3 12>, <&cpu6 3 12>, <&cpu7 3 12>; }; &cpu1_cooling_map4 { cooling-device = <&cpu0 3 7>, <&cpu1 3 7>, <&cpu2 3 7>, <&cpu3 3 7>, <&cpu4 3 12>, <&cpu5 3 12>, <&cpu6 3 12>, <&cpu7 3 12>; }; &cpu2_cooling_map4 { cooling-device = <&cpu0 3 7>, <&cpu1 3 7>, <&cpu2 3 7>, <&cpu3 3 7>, <&cpu4 3 12>, <&cpu5 3 12>, <&cpu6 3 12>, <&cpu7 3 12>; }; &cpu3_cooling_map4 { cooling-device = <&cpu0 3 7>, <&cpu1 3 7>, <&cpu2 3 7>, <&cpu3 3 7>, <&cpu4 3 12>, <&cpu5 3 12>, <&cpu6 3 12>, <&cpu7 3 12>; }; &pwm { /* * PWM 0 -- fan * PWM 1 -- Green LED * PWM 2 -- Blue LED * PWM 3 -- on MIPI connector for backlight */ pinctrl-0 = <&pwm0_out &pwm1_out &pwm2_out &pwm3_out>; pinctrl-names = "default"; status = "okay"; }; &usbdrd_dwc3_1 { dr_mode = "peripheral"; }; &usbhost2 { #address-cells = <1>; #size-cells = <0>; hub@1 { compatible = "usb424,9514"; reg = <1>; #address-cells = <1>; #size-cells = <0>; ethernet: ethernet@1 { compatible = "usb424,ec00"; reg = <1>; local-mac-address = [00 00 00 00 00 00]; /* Filled in by a bootloader */ }; }; };
/* SPDX-License-Identifier: GPL-2.0 * * Copyright 2018-2020 HabanaLabs, Ltd. * All Rights Reserved. * */ /************************************ ** This is an auto-generated file ** ** DO NOT EDIT BELOW ** ************************************/ #ifndef __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_ #define __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_ struct gaudi_async_events_ids_map { int fc_id; int cpu_id; int valid; char name[64]; }; static struct gaudi_async_events_ids_map gaudi_irq_map_table[] = { { .fc_id = 0, .cpu_id = 0, .valid = 0, .name = "" }, { .fc_id = 1, .cpu_id = 1, .valid = 0, .name = "" }, { .fc_id = 2, .cpu_id = 2, .valid = 0, .name = "" }, { .fc_id = 3, .cpu_id = 3, .valid = 0, .name = "" }, { .fc_id = 4, .cpu_id = 4, .valid = 0, .name = "" }, { .fc_id = 5, .cpu_id = 5, .valid = 0, .name = "" }, { .fc_id = 6, .cpu_id = 6, .valid = 0, .name = "" }, { .fc_id = 7, .cpu_id = 7, .valid = 0, .name = "" }, { .fc_id = 8, .cpu_id = 8, .valid = 0, .name = "" }, { .fc_id = 9, .cpu_id = 9, .valid = 0, .name = "" }, { .fc_id = 10, .cpu_id = 10, .valid = 0, .name = "" }, { .fc_id = 11, .cpu_id = 11, .valid = 0, .name = "" }, { .fc_id = 12, .cpu_id = 12, .valid = 0, .name = "" }, { .fc_id = 13, .cpu_id = 13, .valid = 0, .name = "" }, { .fc_id = 14, .cpu_id = 14, .valid = 0, .name = "" }, { .fc_id = 15, .cpu_id = 15, .valid = 0, .name = "" }, { .fc_id = 16, .cpu_id = 16, .valid = 0, .name = "" }, { .fc_id = 17, .cpu_id = 17, .valid = 0, .name = "" }, { .fc_id = 18, .cpu_id = 18, .valid = 0, .name = "" }, { .fc_id = 19, .cpu_id = 19, .valid = 0, .name = "" }, { .fc_id = 20, .cpu_id = 20, .valid = 0, .name = "" }, { .fc_id = 21, .cpu_id = 21, .valid = 0, .name = "" }, { .fc_id = 22, .cpu_id = 22, .valid = 0, .name = "" }, { .fc_id = 23, .cpu_id = 23, .valid = 0, .name = "" }, { .fc_id = 24, .cpu_id = 24, .valid = 0, .name = "" }, { .fc_id = 25, .cpu_id = 25, .valid = 0, .name = "" }, { .fc_id = 26, .cpu_id = 26, .valid = 0, .name = "" }, { .fc_id = 27, .cpu_id = 27, .valid = 0, .name = "" }, { .fc_id = 28, .cpu_id = 28, .valid = 0, .name = "" }, { .fc_id = 29, .cpu_id = 29, .valid = 0, .name = "" }, { .fc_id = 30, .cpu_id = 30, .valid = 0, .name = "" }, { .fc_id = 31, .cpu_id = 31, .valid = 0, .name = "" }, { .fc_id = 32, .cpu_id = 32, .valid = 1, .name = "PCIE_CORE_SERR" }, { .fc_id = 33, .cpu_id = 33, .valid = 1, .name = "PCIE_CORE_DERR" }, { .fc_id = 34, .cpu_id = 34, .valid = 1, .name = "PCIE_IF_SERR" }, { .fc_id = 35, .cpu_id = 35, .valid = 1, .name = "PCIE_IF_DERR" }, { .fc_id = 36, .cpu_id = 36, .valid = 1, .name = "PCIE_PHY_SERR" }, { .fc_id = 37, .cpu_id = 37, .valid = 1, .name = "PCIE_PHY_DERR" }, { .fc_id = 38, .cpu_id = 38, .valid = 1, .name = "TPC0_SERR" }, { .fc_id = 39, .cpu_id = 38, .valid = 1, .name = "TPC1_SERR" }, { .fc_id = 40, .cpu_id = 38, .valid = 1, .name = "TPC2_SERR" }, { .fc_id = 41, .cpu_id = 38, .valid = 1, .name = "TPC3_SERR" }, { .fc_id = 42, .cpu_id = 38, .valid = 1, .name = "TPC4_SERR" }, { .fc_id = 43, .cpu_id = 38, .valid = 1, .name = "TPC5_SERR" }, { .fc_id = 44, .cpu_id = 38, .valid = 1, .name = "TPC6_SERR" }, { .fc_id = 45, .cpu_id = 38, .valid = 1, .name = "TPC7_SERR" }, { .fc_id = 46, .cpu_id = 39, .valid = 1, .name = "TPC0_DERR" }, { .fc_id = 47, .cpu_id = 39, .valid = 1, .name = "TPC1_DERR" }, { .fc_id = 48, .cpu_id = 39, .valid = 1, .name = "TPC2_DERR" }, { .fc_id = 49, .cpu_id = 39, .valid = 1, .name = "TPC3_DERR" }, { .fc_id = 50, .cpu_id = 39, .valid = 1, .name = "TPC4_DERR" }, { .fc_id = 51, .cpu_id = 39, .valid = 1, .name = "TPC5_DERR" }, { .fc_id = 52, .cpu_id = 39, .valid = 1, .name = "TPC6_DERR" }, { .fc_id = 53, .cpu_id = 39, .valid = 1, .name = "TPC7_DERR" }, { .fc_id = 54, .cpu_id = 40, .valid = 1, .name = "MME0_ACC_SERR" }, { .fc_id = 55, .cpu_id = 41, .valid = 1, .name = "MME0_ACC_DERR" }, { .fc_id = 56, .cpu_id = 42, .valid = 1, .name = "MME0_SBAB_SERR" }, { .fc_id = 57, .cpu_id = 43, .valid = 1, .name = "MME0_SBAB_DERR" }, { .fc_id = 58, .cpu_id = 44, .valid = 1, .name = "MME1_ACC_SERR" }, { .fc_id = 59, .cpu_id = 45, .valid = 1, .name = "MME1_ACC_DERR" }, { .fc_id = 60, .cpu_id = 46, .valid = 1, .name = "MME1_SBAB_SERR" }, { .fc_id = 61, .cpu_id = 47, .valid = 1, .name = "MME1_SBAB_DERR" }, { .fc_id = 62, .cpu_id = 48, .valid = 1, .name = "MME2_ACC_SERR" }, { .fc_id = 63, .cpu_id = 49, .valid = 1, .name = "MME2_ACC_DERR" }, { .fc_id = 64, .cpu_id = 50, .valid = 1, .name = "MME2_SBAB_SERR" }, { .fc_id = 65, .cpu_id = 51, .valid = 1, .name = "MME2_SBAB_DERR" }, { .fc_id = 66, .cpu_id = 52, .valid = 1, .name = "MME3_ACC_SERR" }, { .fc_id = 67, .cpu_id = 53, .valid = 1, .name = "MME3_ACC_DERR" }, { .fc_id = 68, .cpu_id = 54, .valid = 1, .name = "MME3_SBAB_SERR" }, { .fc_id = 69, .cpu_id = 55, .valid = 1, .name = "MME3_SBAB_DERR" }, { .fc_id = 70, .cpu_id = 56, .valid = 1, .name = "DMA0_SERR_ECC" }, { .fc_id = 71, .cpu_id = 56, .valid = 1, .name = "DMA1_SERR_ECC" }, { .fc_id = 72, .cpu_id = 56, .valid = 1, .name = "DMA2_SERR_ECC" }, { .fc_id = 73, .cpu_id = 56, .valid = 1, .name = "DMA3_SERR_ECC" }, { .fc_id = 74, .cpu_id = 56, .valid = 1, .name = "DMA4_SERR_ECC" }, { .fc_id = 75, .cpu_id = 56, .valid = 1, .name = "DMA5_SERR_ECC" }, { .fc_id = 76, .cpu_id = 56, .valid = 1, .name = "DMA6_SERR_ECC" }, { .fc_id = 77, .cpu_id = 56, .valid = 1, .name = "DMA7_SERR_ECC" }, { .fc_id = 78, .cpu_id = 57, .valid = 1, .name = "DMA0_DERR_ECC" }, { .fc_id = 79, .cpu_id = 57, .valid = 1, .name = "DMA1_DERR_ECC" }, { .fc_id = 80, .cpu_id = 57, .valid = 1, .name = "DMA2_DERR_ECC" }, { .fc_id = 81, .cpu_id = 57, .valid = 1, .name = "DMA3_DERR_ECC" }, { .fc_id = 82, .cpu_id = 57, .valid = 1, .name = "DMA4_DERR_ECC" }, { .fc_id = 83, .cpu_id = 57, .valid = 1, .name = "DMA5_DERR_ECC" }, { .fc_id = 84, .cpu_id = 57, .valid = 1, .name = "DMA6_DERR_ECC" }, { .fc_id = 85, .cpu_id = 57, .valid = 1, .name = "DMA7_DERR_ECC" }, { .fc_id = 86, .cpu_id = 58, .valid = 1, .name = "CPU_IF_ECC_SERR" }, { .fc_id = 87, .cpu_id = 59, .valid = 1, .name = "CPU_IF_ECC_DERR" }, { .fc_id = 88, .cpu_id = 60, .valid = 1, .name = "PSOC_MEM_SERR" }, { .fc_id = 89, .cpu_id = 61, .valid = 1, .name = "PSOC_CORESIGHT_SERR" }, { .fc_id = 90, .cpu_id = 62, .valid = 1, .name = "PSOC_MEM_DERR" }, { .fc_id = 91, .cpu_id = 63, .valid = 1, .name = "PSOC_CORESIGHT_DERR" }, { .fc_id = 92, .cpu_id = 64, .valid = 1, .name = "SRAM0_SERR" }, { .fc_id = 93, .cpu_id = 64, .valid = 1, .name = "SRAM1_SERR" }, { .fc_id = 94, .cpu_id = 64, .valid = 1, .name = "SRAM2_SERR" }, { .fc_id = 95, .cpu_id = 64, .valid = 1, .name = "SRAM3_SERR" }, { .fc_id = 96, .cpu_id = 64, .valid = 1, .name = "SRAM7_SERR" }, { .fc_id = 97, .cpu_id = 64, .valid = 1, .name = "SRAM6_SERR" }, { .fc_id = 98, .cpu_id = 64, .valid = 1, .name = "SRAM5_SERR" }, { .fc_id = 99, .cpu_id = 64, .valid = 1, .name = "SRAM4_SERR" }, { .fc_id = 100, .cpu_id = 64, .valid = 1, .name = "SRAM8_SERR" }, { .fc_id = 101, .cpu_id = 64, .valid = 1, .name = "SRAM9_SERR" }, { .fc_id = 102, .cpu_id = 64, .valid = 1, .name = "SRAM10_SERR" }, { .fc_id = 103, .cpu_id = 64, .valid = 1, .name = "SRAM11_SERR" }, { .fc_id = 104, .cpu_id = 64, .valid = 1, .name = "SRAM15_SERR" }, { .fc_id = 105, .cpu_id = 64, .valid = 1, .name = "SRAM14_SERR" }, { .fc_id = 106, .cpu_id = 64, .valid = 1, .name = "SRAM13_SERR" }, { .fc_id = 107, .cpu_id = 64, .valid = 1, .name = "SRAM12_SERR" }, { .fc_id = 108, .cpu_id = 64, .valid = 1, .name = "SRAM16_SERR" }, { .fc_id = 109, .cpu_id = 64, .valid = 1, .name = "SRAM17_SERR" }, { .fc_id = 110, .cpu_id = 64, .valid = 1, .name = "SRAM18_SERR" }, { .fc_id = 111, .cpu_id = 64, .valid = 1, .name = "SRAM19_SERR" }, { .fc_id = 112, .cpu_id = 64, .valid = 1, .name = "SRAM23_SERR" }, { .fc_id = 113, .cpu_id = 64, .valid = 1, .name = "SRAM22_SERR" }, { .fc_id = 114, .cpu_id = 64, .valid = 1, .name = "SRAM21_SERR" }, { .fc_id = 115, .cpu_id = 64, .valid = 1, .name = "SRAM20_SERR" }, { .fc_id = 116, .cpu_id = 64, .valid = 1, .name = "SRAM24_SERR" }, { .fc_id = 117, .cpu_id = 64, .valid = 1, .name = "SRAM25_SERR" }, { .fc_id = 118, .cpu_id = 64, .valid = 1, .name = "SRAM26_SERR" }, { .fc_id = 119, .cpu_id = 64, .valid = 1, .name = "SRAM27_SERR" }, { .fc_id = 120, .cpu_id = 64, .valid = 1, .name = "SRAM31_SERR" }, { .fc_id = 121, .cpu_id = 64, .valid = 1, .name = "SRAM30_SERR" }, { .fc_id = 122, .cpu_id = 64, .valid = 1, .name = "SRAM29_SERR" }, { .fc_id = 123, .cpu_id = 64, .valid = 1, .name = "SRAM28_SERR" }, { .fc_id = 124, .cpu_id = 65, .valid = 1, .name = "SRAM0_DERR" }, { .fc_id = 125, .cpu_id = 65, .valid = 1, .name = "SRAM1_DERR" }, { .fc_id = 126, .cpu_id = 65, .valid = 1, .name = "SRAM2_DERR" }, { .fc_id = 127, .cpu_id = 65, .valid = 1, .name = "SRAM3_DERR" }, { .fc_id = 128, .cpu_id = 65, .valid = 1, .name = "SRAM7_DERR" }, { .fc_id = 129, .cpu_id = 65, .valid = 1, .name = "SRAM6_DERR" }, { .fc_id = 130, .cpu_id = 65, .valid = 1, .name = "SRAM5_DERR" }, { .fc_id = 131, .cpu_id = 65, .valid = 1, .name = "SRAM4_DERR" }, { .fc_id = 132, .cpu_id = 65, .valid = 1, .name = "SRAM8_DERR" }, { .fc_id = 133, .cpu_id = 65, .valid = 1, .name = "SRAM9_DERR" }, { .fc_id = 134, .cpu_id = 65, .valid = 1, .name = "SRAM10_DERR" }, { .fc_id = 135, .cpu_id = 65, .valid = 1, .name = "SRAM11_DERR" }, { .fc_id = 136, .cpu_id = 65, .valid = 1, .name = "SRAM15_DERR" }, { .fc_id = 137, .cpu_id = 65, .valid = 1, .name = "SRAM14_DERR" }, { .fc_id = 138, .cpu_id = 65, .valid = 1, .name = "SRAM13_DERR" }, { .fc_id = 139, .cpu_id = 65, .valid = 1, .name = "SRAM12_DERR" }, { .fc_id = 140, .cpu_id = 65, .valid = 1, .name = "SRAM16_DERR" }, { .fc_id = 141, .cpu_id = 65, .valid = 1, .name = "SRAM17_DERR" }, { .fc_id = 142, .cpu_id = 65, .valid = 1, .name = "SRAM18_DERR" }, { .fc_id = 143, .cpu_id = 65, .valid = 1, .name = "SRAM19_DERR" }, { .fc_id = 144, .cpu_id = 65, .valid = 1, .name = "SRAM23_DERR" }, { .fc_id = 145, .cpu_id = 65, .valid = 1, .name = "SRAM22_DERR" }, { .fc_id = 146, .cpu_id = 65, .valid = 1, .name = "SRAM21_DERR" }, { .fc_id = 147, .cpu_id = 65, .valid = 1, .name = "SRAM20_DERR" }, { .fc_id = 148, .cpu_id = 65, .valid = 1, .name = "SRAM24_DERR" }, { .fc_id = 149, .cpu_id = 65, .valid = 1, .name = "SRAM25_DERR" }, { .fc_id = 150, .cpu_id = 65, .valid = 1, .name = "SRAM26_DERR" }, { .fc_id = 151, .cpu_id = 65, .valid = 1, .name = "SRAM27_DERR" }, { .fc_id = 152, .cpu_id = 65, .valid = 1, .name = "SRAM31_DERR" }, { .fc_id = 153, .cpu_id = 65, .valid = 1, .name = "SRAM30_DERR" }, { .fc_id = 154, .cpu_id = 65, .valid = 1, .name = "SRAM29_DERR" }, { .fc_id = 155, .cpu_id = 65, .valid = 1, .name = "SRAM28_DERR" }, { .fc_id = 156, .cpu_id = 66, .valid = 1, .name = "NIC0_SERR" }, { .fc_id = 157, .cpu_id = 66, .valid = 1, .name = "NIC1_SERR" }, { .fc_id = 158, .cpu_id = 66, .valid = 1, .name = "NIC2_SERR" }, { .fc_id = 159, .cpu_id = 66, .valid = 1, .name = "NIC3_SERR" }, { .fc_id = 160, .cpu_id = 66, .valid = 1, .name = "NIC4_SERR" }, { .fc_id = 161, .cpu_id = 66, .valid = 0, .name = "" }, { .fc_id = 162, .cpu_id = 66, .valid = 0, .name = "" }, { .fc_id = 163, .cpu_id = 66, .valid = 0, .name = "" }, { .fc_id = 164, .cpu_id = 66, .valid = 0, .name = "" }, { .fc_id = 165, .cpu_id = 66, .valid = 0, .name = "" }, { .fc_id = 166, .cpu_id = 67, .valid = 1, .name = "NIC0_DERR" }, { .fc_id = 167, .cpu_id = 67, .valid = 1, .name = "NIC1_DERR" }, { .fc_id = 168, .cpu_id = 67, .valid = 1, .name = "NIC2_DERR" }, { .fc_id = 169, .cpu_id = 67, .valid = 1, .name = "NIC3_DERR" }, { .fc_id = 170, .cpu_id = 67, .valid = 1, .name = "NIC4_DERR" }, { .fc_id = 171, .cpu_id = 67, .valid = 0, .name = "" }, { .fc_id = 172, .cpu_id = 67, .valid = 0, .name = "" }, { .fc_id = 173, .cpu_id = 67, .valid = 0, .name = "" }, { .fc_id = 174, .cpu_id = 67, .valid = 0, .name = "" }, { .fc_id = 175, .cpu_id = 67, .valid = 0, .name = "" }, { .fc_id = 176, .cpu_id = 68, .valid = 1, .name = "DMA_IF0_SERR" }, { .fc_id = 177, .cpu_id = 68, .valid = 1, .name = "DMA_IF1_SERR" }, { .fc_id = 178, .cpu_id = 68, .valid = 1, .name = "DMA_IF2_SERR" }, { .fc_id = 179, .cpu_id = 68, .valid = 1, .name = "DMA_IF3_SERR" }, { .fc_id = 180, .cpu_id = 69, .valid = 1, .name = "DMA_IF0_DERR" }, { .fc_id = 181, .cpu_id = 69, .valid = 1, .name = "DMA_IF1_DERR" }, { .fc_id = 182, .cpu_id = 69, .valid = 1, .name = "DMA_IF2_DERR" }, { .fc_id = 183, .cpu_id = 69, .valid = 1, .name = "DMA_IF3_DERR" }, { .fc_id = 184, .cpu_id = 70, .valid = 1, .name = "GIC500" }, { .fc_id = 185, .cpu_id = 71, .valid = 1, .name = "HBM_0_SERR" }, { .fc_id = 186, .cpu_id = 71, .valid = 1, .name = "HBM_1_SERR" }, { .fc_id = 187, .cpu_id = 71, .valid = 1, .name = "HBM_2_SERR" }, { .fc_id = 188, .cpu_id = 71, .valid = 1, .name = "HBM_3_SERR" }, { .fc_id = 189, .cpu_id = 72, .valid = 1, .name = "HBM_0_DERR" }, { .fc_id = 190, .cpu_id = 72, .valid = 1, .name = "HBM_1_DERR" }, { .fc_id = 191, .cpu_id = 72, .valid = 1, .name = "HBM_2_DERR" }, { .fc_id = 192, .cpu_id = 72, .valid = 1, .name = "HBM_3_DERR" }, { .fc_id = 193, .cpu_id = 73, .valid = 1, .name = "MMU_SERR" }, { .fc_id = 194, .cpu_id = 74, .valid = 1, .name = "MMU_DERR" }, { .fc_id = 195, .cpu_id = 75, .valid = 0, .name = "" }, { .fc_id = 196, .cpu_id = 76, .valid = 0, .name = "" }, { .fc_id = 197, .cpu_id = 77, .valid = 0, .name = "" }, { .fc_id = 198, .cpu_id = 78, .valid = 0, .name = "" }, { .fc_id = 199, .cpu_id = 79, .valid = 0, .name = "" }, { .fc_id = 200, .cpu_id = 80, .valid = 1, .name = "PCIE_DEC" }, { .fc_id = 201, .cpu_id = 81, .valid = 1, .name = "TPC0_DEC" }, { .fc_id = 202, .cpu_id = 82, .valid = 0, .name = "" }, { .fc_id = 203, .cpu_id = 83, .valid = 1, .name = "TPC1_DEC" }, { .fc_id = 204, .cpu_id = 84, .valid = 0, .name = "" }, { .fc_id = 205, .cpu_id = 85, .valid = 1, .name = "TPC2_DEC" }, { .fc_id = 206, .cpu_id = 86, .valid = 0, .name = "" }, { .fc_id = 207, .cpu_id = 87, .valid = 1, .name = "TPC3_DEC" }, { .fc_id = 208, .cpu_id = 88, .valid = 0, .name = "" }, { .fc_id = 209, .cpu_id = 89, .valid = 1, .name = "TPC4_DEC" }, { .fc_id = 210, .cpu_id = 90, .valid = 0, .name = "" }, { .fc_id = 211, .cpu_id = 91, .valid = 1, .name = "TPC5_DEC" }, { .fc_id = 212, .cpu_id = 92, .valid = 0, .name = "" }, { .fc_id = 213, .cpu_id = 93, .valid = 1, .name = "TPC6_DEC" }, { .fc_id = 214, .cpu_id = 94, .valid = 0, .name = "" }, { .fc_id = 215, .cpu_id = 95, .valid = 1, .name = "TPC7_DEC" }, { .fc_id = 216, .cpu_id = 96, .valid = 0, .name = "" }, { .fc_id = 217, .cpu_id = 97, .valid = 1, .name = "AXI_ECC" }, { .fc_id = 218, .cpu_id = 98, .valid = 1, .name = "L2_RAM_ECC" }, { .fc_id = 219, .cpu_id = 99, .valid = 1, .name = "MME0_WBC_RSP" }, { .fc_id = 220, .cpu_id = 100, .valid = 1, .name = "MME0_SBAB0_RSP" }, { .fc_id = 221, .cpu_id = 101, .valid = 0, .name = "" }, { .fc_id = 222, .cpu_id = 102, .valid = 0, .name = "" }, { .fc_id = 223, .cpu_id = 103, .valid = 0, .name = "" }, { .fc_id = 224, .cpu_id = 104, .valid = 1, .name = "MME1_WBC_RSP" }, { .fc_id = 225, .cpu_id = 105, .valid = 1, .name = "MME1_SBAB0_RSP" }, { .fc_id = 226, .cpu_id = 106, .valid = 0, .name = "" }, { .fc_id = 227, .cpu_id = 107, .valid = 0, .name = "" }, { .fc_id = 228, .cpu_id = 108, .valid = 0, .name = "" }, { .fc_id = 229, .cpu_id = 109, .valid = 1, .name = "MME2_WBC_RSP" }, { .fc_id = 230, .cpu_id = 110, .valid = 1, .name = "MME2_SBAB0_RSP" }, { .fc_id = 231, .cpu_id = 111, .valid = 0, .name = "" }, { .fc_id = 232, .cpu_id = 112, .valid = 0, .name = "" }, { .fc_id = 233, .cpu_id = 113, .valid = 0, .name = "" }, { .fc_id = 234, .cpu_id = 114, .valid = 1, .name = "MME3_WBC_RSP" }, { .fc_id = 235, .cpu_id = 115, .valid = 1, .name = "MME3_SBAB0_RSP" }, { .fc_id = 236, .cpu_id = 116, .valid = 0, .name = "" }, { .fc_id = 237, .cpu_id = 117, .valid = 0, .name = "" }, { .fc_id = 238, .cpu_id = 118, .valid = 0, .name = "" }, { .fc_id = 239, .cpu_id = 119, .valid = 1, .name = "PLL0" }, { .fc_id = 240, .cpu_id = 119, .valid = 1, .name = "PLL1" }, { .fc_id = 241, .cpu_id = 119, .valid = 1, .name = "PLL2" }, { .fc_id = 242, .cpu_id = 119, .valid = 1, .name = "PLL3" }, { .fc_id = 243, .cpu_id = 119, .valid = 1, .name = "PLL4" }, { .fc_id = 244, .cpu_id = 119, .valid = 1, .name = "PLL5" }, { .fc_id = 245, .cpu_id = 119, .valid = 1, .name = "PLL6" }, { .fc_id = 246, .cpu_id = 119, .valid = 1, .name = "PLL7" }, { .fc_id = 247, .cpu_id = 119, .valid = 1, .name = "PLL8" }, { .fc_id = 248, .cpu_id = 119, .valid = 1, .name = "PLL9" }, { .fc_id = 249, .cpu_id = 119, .valid = 1, .name = "PLL10" }, { .fc_id = 250, .cpu_id = 119, .valid = 1, .name = "PLL11" }, { .fc_id = 251, .cpu_id = 119, .valid = 1, .name = "PLL12" }, { .fc_id = 252, .cpu_id = 119, .valid = 1, .name = "PLL13" }, { .fc_id = 253, .cpu_id = 119, .valid = 1, .name = "PLL14" }, { .fc_id = 254, .cpu_id = 119, .valid = 1, .name = "PLL15" }, { .fc_id = 255, .cpu_id = 119, .valid = 1, .name = "PLL16" }, { .fc_id = 256, .cpu_id = 119, .valid = 1, .name = "PLL17" }, { .fc_id = 257, .cpu_id = 120, .valid = 1, .name = "CPU_AXI_SPLITTER" }, { .fc_id = 258, .cpu_id = 121, .valid = 0, .name = "" }, { .fc_id = 259, .cpu_id = 122, .valid = 0, .name = "" }, { .fc_id = 260, .cpu_id = 123, .valid = 0, .name = "" }, { .fc_id = 261, .cpu_id = 124, .valid = 0, .name = "" }, { .fc_id = 262, .cpu_id = 125, .valid = 1, .name = "PSOC_AXI_DEC" }, { .fc_id = 263, .cpu_id = 126, .valid = 1, .name = "PSOC_PRSTN_FALL" }, { .fc_id = 264, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_0" }, { .fc_id = 265, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_1" }, { .fc_id = 266, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_2" }, { .fc_id = 267, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_3" }, { .fc_id = 268, .cpu_id = 127, .valid = 1, .name = "NIC_SEI_4" }, { .fc_id = 269, .cpu_id = 128, .valid = 0, .name = "" }, { .fc_id = 270, .cpu_id = 128, .valid = 0, .name = "" }, { .fc_id = 271, .cpu_id = 128, .valid = 0, .name = "" }, { .fc_id = 272, .cpu_id = 128, .valid = 0, .name = "" }, { .fc_id = 273, .cpu_id = 128, .valid = 0, .name = "" }, { .fc_id = 274, .cpu_id = 128, .valid = 0, .name = "" }, { .fc_id = 275, .cpu_id = 128, .valid = 0, .name = "" }, { .fc_id = 276, .cpu_id = 128, .valid = 0, .name = "" }, { .fc_id = 277, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_0" }, { .fc_id = 278, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_1" }, { .fc_id = 279, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_2" }, { .fc_id = 280, .cpu_id = 129, .valid = 1, .name = "DMA_IF_SEI_3" }, { .fc_id = 281, .cpu_id = 130, .valid = 0, .name = "" }, { .fc_id = 282, .cpu_id = 131, .valid = 0, .name = "" }, { .fc_id = 283, .cpu_id = 132, .valid = 0, .name = "" }, { .fc_id = 284, .cpu_id = 133, .valid = 0, .name = "" }, { .fc_id = 285, .cpu_id = 134, .valid = 0, .name = "" }, { .fc_id = 286, .cpu_id = 135, .valid = 0, .name = "" }, { .fc_id = 287, .cpu_id = 136, .valid = 0, .name = "" }, { .fc_id = 288, .cpu_id = 137, .valid = 0, .name = "" }, { .fc_id = 289, .cpu_id = 138, .valid = 0, .name = "" }, { .fc_id = 290, .cpu_id = 139, .valid = 1, .name = "PCIE_FLR" }, { .fc_id = 291, .cpu_id = 140, .valid = 0, .name = "" }, { .fc_id = 292, .cpu_id = 141, .valid = 0, .name = "" }, { .fc_id = 293, .cpu_id = 142, .valid = 0, .name = "" }, { .fc_id = 294, .cpu_id = 143, .valid = 0, .name = "" }, { .fc_id = 295, .cpu_id = 144, .valid = 0, .name = "" }, { .fc_id = 296, .cpu_id = 145, .valid = 0, .name = "" }, { .fc_id = 297, .cpu_id = 146, .valid = 0, .name = "" }, { .fc_id = 298, .cpu_id = 147, .valid = 0, .name = "" }, { .fc_id = 299, .cpu_id = 148, .valid = 0, .name = "" }, { .fc_id = 300, .cpu_id = 149, .valid = 1, .name = "TPC0_BMON_SPMU" }, { .fc_id = 301, .cpu_id = 150, .valid = 1, .name = "TPC0_KRN_ERR" }, { .fc_id = 302, .cpu_id = 151, .valid = 0, .name = "" }, { .fc_id = 303, .cpu_id = 152, .valid = 0, .name = "" }, { .fc_id = 304, .cpu_id = 153, .valid = 0, .name = "" }, { .fc_id = 305, .cpu_id = 154, .valid = 0, .name = "" }, { .fc_id = 306, .cpu_id = 155, .valid = 1, .name = "TPC1_BMON_SPMU" }, { .fc_id = 307, .cpu_id = 156, .valid = 1, .name = "TPC1_KRN_ERR" }, { .fc_id = 308, .cpu_id = 157, .valid = 0, .name = "" }, { .fc_id = 309, .cpu_id = 158, .valid = 0, .name = "" }, { .fc_id = 310, .cpu_id = 159, .valid = 0, .name = "" }, { .fc_id = 311, .cpu_id = 160, .valid = 0, .name = "" }, { .fc_id = 312, .cpu_id = 161, .valid = 1, .name = "TPC2_BMON_SPMU" }, { .fc_id = 313, .cpu_id = 162, .valid = 1, .name = "TPC2_KRN_ERR" }, { .fc_id = 314, .cpu_id = 163, .valid = 0, .name = "" }, { .fc_id = 315, .cpu_id = 164, .valid = 0, .name = "" }, { .fc_id = 316, .cpu_id = 165, .valid = 0, .name = "" }, { .fc_id = 317, .cpu_id = 166, .valid = 0, .name = "" }, { .fc_id = 318, .cpu_id = 167, .valid = 1, .name = "TPC3_BMON_SPMU" }, { .fc_id = 319, .cpu_id = 168, .valid = 1, .name = "TPC3_KRN_ERR" }, { .fc_id = 320, .cpu_id = 169, .valid = 0, .name = "" }, { .fc_id = 321, .cpu_id = 170, .valid = 0, .name = "" }, { .fc_id = 322, .cpu_id = 171, .valid = 0, .name = "" }, { .fc_id = 323, .cpu_id = 172, .valid = 0, .name = "" }, { .fc_id = 324, .cpu_id = 173, .valid = 1, .name = "TPC4_BMON_SPMU" }, { .fc_id = 325, .cpu_id = 174, .valid = 1, .name = "TPC4_KRN_ERR" }, { .fc_id = 326, .cpu_id = 175, .valid = 0, .name = "" }, { .fc_id = 327, .cpu_id = 176, .valid = 0, .name = "" }, { .fc_id = 328, .cpu_id = 177, .valid = 0, .name = "" }, { .fc_id = 329, .cpu_id = 178, .valid = 0, .name = "" }, { .fc_id = 330, .cpu_id = 179, .valid = 1, .name = "TPC5_BMON_SPMU" }, { .fc_id = 331, .cpu_id = 180, .valid = 1, .name = "TPC5_KRN_ERR" }, { .fc_id = 332, .cpu_id = 181, .valid = 0, .name = "" }, { .fc_id = 333, .cpu_id = 182, .valid = 0, .name = "" }, { .fc_id = 334, .cpu_id = 183, .valid = 0, .name = "" }, { .fc_id = 335, .cpu_id = 184, .valid = 0, .name = "" }, { .fc_id = 336, .cpu_id = 185, .valid = 1, .name = "TPC6_BMON_SPMU" }, { .fc_id = 337, .cpu_id = 186, .valid = 1, .name = "TPC6_KRN_ERR" }, { .fc_id = 338, .cpu_id = 187, .valid = 0, .name = "" }, { .fc_id = 339, .cpu_id = 188, .valid = 0, .name = "" }, { .fc_id = 340, .cpu_id = 189, .valid = 0, .name = "" }, { .fc_id = 341, .cpu_id = 190, .valid = 0, .name = "" }, { .fc_id = 342, .cpu_id = 191, .valid = 1, .name = "TPC7_BMON_SPMU" }, { .fc_id = 343, .cpu_id = 192, .valid = 1, .name = "TPC7_KRN_ERR" }, { .fc_id = 344, .cpu_id = 193, .valid = 0, .name = "" }, { .fc_id = 345, .cpu_id = 194, .valid = 0, .name = "" }, { .fc_id = 346, .cpu_id = 195, .valid = 0, .name = "" }, { .fc_id = 347, .cpu_id = 196, .valid = 0, .name = "" }, { .fc_id = 348, .cpu_id = 197, .valid = 0, .name = "" }, { .fc_id = 349, .cpu_id = 198, .valid = 0, .name = "" }, { .fc_id = 350, .cpu_id = 199, .valid = 0, .name = "" }, { .fc_id = 351, .cpu_id = 200, .valid = 0, .name = "" }, { .fc_id = 352, .cpu_id = 201, .valid = 0, .name = "" }, { .fc_id = 353, .cpu_id = 202, .valid = 0, .name = "" }, { .fc_id = 354, .cpu_id = 203, .valid = 0, .name = "" }, { .fc_id = 355, .cpu_id = 204, .valid = 0, .name = "" }, { .fc_id = 356, .cpu_id = 205, .valid = 0, .name = "" }, { .fc_id = 357, .cpu_id = 206, .valid = 0, .name = "" }, { .fc_id = 358, .cpu_id = 207, .valid = 0, .name = "" }, { .fc_id = 359, .cpu_id = 208, .valid = 0, .name = "" }, { .fc_id = 360, .cpu_id = 209, .valid = 0, .name = "" }, { .fc_id = 361, .cpu_id = 210, .valid = 0, .name = "" }, { .fc_id = 362, .cpu_id = 211, .valid = 0, .name = "" }, { .fc_id = 363, .cpu_id = 212, .valid = 0, .name = "" }, { .fc_id = 364, .cpu_id = 213, .valid = 0, .name = "" }, { .fc_id = 365, .cpu_id = 214, .valid = 0, .name = "" }, { .fc_id = 366, .cpu_id = 215, .valid = 0, .name = "" }, { .fc_id = 367, .cpu_id = 216, .valid = 0, .name = "" }, { .fc_id = 368, .cpu_id = 217, .valid = 0, .name = "" }, { .fc_id = 369, .cpu_id = 218, .valid = 0, .name = "" }, { .fc_id = 370, .cpu_id = 219, .valid = 0, .name = "" }, { .fc_id = 371, .cpu_id = 220, .valid = 0, .name = "" }, { .fc_id = 372, .cpu_id = 221, .valid = 0, .name = "" }, { .fc_id = 373, .cpu_id = 222, .valid = 0, .name = "" }, { .fc_id = 374, .cpu_id = 223, .valid = 0, .name = "" }, { .fc_id = 375, .cpu_id = 224, .valid = 0, .name = "" }, { .fc_id = 376, .cpu_id = 225, .valid = 0, .name = "" }, { .fc_id = 377, .cpu_id = 226, .valid = 0, .name = "" }, { .fc_id = 378, .cpu_id = 227, .valid = 0, .name = "" }, { .fc_id = 379, .cpu_id = 228, .valid = 0, .name = "" }, { .fc_id = 380, .cpu_id = 229, .valid = 1, .name = "MMU_PAGE_FAULT" }, { .fc_id = 381, .cpu_id = 230, .valid = 1, .name = "MMU_WR_PERM" }, { .fc_id = 382, .cpu_id = 231, .valid = 0, .name = "" }, { .fc_id = 383, .cpu_id = 232, .valid = 1, .name = "DMA_BM_CH0" }, { .fc_id = 384, .cpu_id = 233, .valid = 1, .name = "DMA_BM_CH1" }, { .fc_id = 385, .cpu_id = 234, .valid = 1, .name = "DMA_BM_CH2" }, { .fc_id = 386, .cpu_id = 235, .valid = 1, .name = "DMA_BM_CH3" }, { .fc_id = 387, .cpu_id = 236, .valid = 1, .name = "DMA_BM_CH4" }, { .fc_id = 388, .cpu_id = 237, .valid = 1, .name = "DMA_BM_CH5" }, { .fc_id = 389, .cpu_id = 238, .valid = 1, .name = "DMA_BM_CH6" }, { .fc_id = 390, .cpu_id = 239, .valid = 1, .name = "DMA_BM_CH7" }, { .fc_id = 391, .cpu_id = 240, .valid = 0, .name = "" }, { .fc_id = 392, .cpu_id = 241, .valid = 0, .name = "" }, { .fc_id = 393, .cpu_id = 242, .valid = 0, .name = "" }, { .fc_id = 394, .cpu_id = 243, .valid = 0, .name = "" }, { .fc_id = 395, .cpu_id = 244, .valid = 1, .name = "HBM0_SPI_0" }, { .fc_id = 396, .cpu_id = 245, .valid = 1, .name = "HBM0_SPI_1" }, { .fc_id = 397, .cpu_id = 246, .valid = 0, .name = "" }, { .fc_id = 398, .cpu_id = 247, .valid = 0, .name = "" }, { .fc_id = 399, .cpu_id = 248, .valid = 1, .name = "HBM1_SPI_0" }, { .fc_id = 400, .cpu_id = 249, .valid = 1, .name = "HBM1_SPI_1" }, { .fc_id = 401, .cpu_id = 250, .valid = 0, .name = "" }, { .fc_id = 402, .cpu_id = 251, .valid = 0, .name = "" }, { .fc_id = 403, .cpu_id = 252, .valid = 1, .name = "HBM2_SPI_0" }, { .fc_id = 404, .cpu_id = 253, .valid = 1, .name = "HBM2_SPI_1" }, { .fc_id = 405, .cpu_id = 254, .valid = 0, .name = "" }, { .fc_id = 406, .cpu_id = 255, .valid = 0, .name = "" }, { .fc_id = 407, .cpu_id = 256, .valid = 1, .name = "HBM3_SPI_0" }, { .fc_id = 408, .cpu_id = 257, .valid = 1, .name = "HBM3_SPI_1" }, { .fc_id = 409, .cpu_id = 258, .valid = 0, .name = "" }, { .fc_id = 410, .cpu_id = 259, .valid = 0, .name = "" }, { .fc_id = 411, .cpu_id = 260, .valid = 0, .name = "" }, { .fc_id = 412, .cpu_id = 261, .valid = 0, .name = "" }, { .fc_id = 413, .cpu_id = 262, .valid = 0, .name = "" }, { .fc_id = 414, .cpu_id = 263, .valid = 0, .name = "" }, { .fc_id = 415, .cpu_id = 264, .valid = 0, .name = "" }, { .fc_id = 416, .cpu_id = 265, .valid = 0, .name = "" }, { .fc_id = 417, .cpu_id = 266, .valid = 0, .name = "" }, { .fc_id = 418, .cpu_id = 267, .valid = 0, .name = "" }, { .fc_id = 419, .cpu_id = 268, .valid = 0, .name = "" }, { .fc_id = 420, .cpu_id = 269, .valid = 0, .name = "" }, { .fc_id = 421, .cpu_id = 270, .valid = 1, .name = "PSOC_GPIO_U16_0" }, { .fc_id = 422, .cpu_id = 271, .valid = 0, .name = "" }, { .fc_id = 423, .cpu_id = 272, .valid = 0, .name = "" }, { .fc_id = 424, .cpu_id = 273, .valid = 0, .name = "" }, { .fc_id = 425, .cpu_id = 274, .valid = 0, .name = "" }, { .fc_id = 426, .cpu_id = 275, .valid = 0, .name = "" }, { .fc_id = 427, .cpu_id = 276, .valid = 0, .name = "" }, { .fc_id = 428, .cpu_id = 277, .valid = 0, .name = "" }, { .fc_id = 429, .cpu_id = 278, .valid = 0, .name = "" }, { .fc_id = 430, .cpu_id = 279, .valid = 0, .name = "" }, { .fc_id = 431, .cpu_id = 280, .valid = 0, .name = "" }, { .fc_id = 432, .cpu_id = 281, .valid = 0, .name = "" }, { .fc_id = 433, .cpu_id = 282, .valid = 0, .name = "" }, { .fc_id = 434, .cpu_id = 283, .valid = 0, .name = "" }, { .fc_id = 435, .cpu_id = 284, .valid = 0, .name = "" }, { .fc_id = 436, .cpu_id = 285, .valid = 0, .name = "" }, { .fc_id = 437, .cpu_id = 286, .valid = 0, .name = "" }, { .fc_id = 438, .cpu_id = 287, .valid = 0, .name = "" }, { .fc_id = 439, .cpu_id = 288, .valid = 0, .name = "" }, { .fc_id = 440, .cpu_id = 289, .valid = 0, .name = "" }, { .fc_id = 441, .cpu_id = 290, .valid = 0, .name = "" }, { .fc_id = 442, .cpu_id = 291, .valid = 0, .name = "" }, { .fc_id = 443, .cpu_id = 292, .valid = 0, .name = "" }, { .fc_id = 444, .cpu_id = 293, .valid = 0, .name = "" }, { .fc_id = 445, .cpu_id = 294, .valid = 0, .name = "" }, { .fc_id = 446, .cpu_id = 295, .valid = 0, .name = "" }, { .fc_id = 447, .cpu_id = 296, .valid = 0, .name = "" }, { .fc_id = 448, .cpu_id = 297, .valid = 0, .name = "" }, { .fc_id = 449, .cpu_id = 298, .valid = 0, .name = "" }, { .fc_id = 450, .cpu_id = 299, .valid = 0, .name = "" }, { .fc_id = 451, .cpu_id = 300, .valid = 0, .name = "" }, { .fc_id = 452, .cpu_id = 301, .valid = 0, .name = "" }, { .fc_id = 453, .cpu_id = 302, .valid = 0, .name = "" }, { .fc_id = 454, .cpu_id = 303, .valid = 0, .name = "" }, { .fc_id = 455, .cpu_id = 304, .valid = 0, .name = "" }, { .fc_id = 456, .cpu_id = 305, .valid = 0, .name = "" }, { .fc_id = 457, .cpu_id = 306, .valid = 0, .name = "" }, { .fc_id = 458, .cpu_id = 307, .valid = 0, .name = "" }, { .fc_id = 459, .cpu_id = 308, .valid = 0, .name = "" }, { .fc_id = 460, .cpu_id = 309, .valid = 0, .name = "" }, { .fc_id = 461, .cpu_id = 310, .valid = 0, .name = "" }, { .fc_id = 462, .cpu_id = 311, .valid = 0, .name = "" }, { .fc_id = 463, .cpu_id = 312, .valid = 0, .name = "" }, { .fc_id = 464, .cpu_id = 313, .valid = 0, .name = "" }, { .fc_id = 465, .cpu_id = 314, .valid = 0, .name = "" }, { .fc_id = 466, .cpu_id = 315, .valid = 0, .name = "" }, { .fc_id = 467, .cpu_id = 316, .valid = 0, .name = "" }, { .fc_id = 468, .cpu_id = 317, .valid = 0, .name = "" }, { .fc_id = 469, .cpu_id = 318, .valid = 0, .name = "" }, { .fc_id = 470, .cpu_id = 319, .valid = 0, .name = "" }, { .fc_id = 471, .cpu_id = 320, .valid = 0, .name = "" }, { .fc_id = 472, .cpu_id = 321, .valid = 0, .name = "" }, { .fc_id = 473, .cpu_id = 322, .valid = 0, .name = "" }, { .fc_id = 474, .cpu_id = 323, .valid = 0, .name = "" }, { .fc_id = 475, .cpu_id = 324, .valid = 0, .name = "" }, { .fc_id = 476, .cpu_id = 325, .valid = 0, .name = "" }, { .fc_id = 477, .cpu_id = 326, .valid = 0, .name = "" }, { .fc_id = 478, .cpu_id = 327, .valid = 0, .name = "" }, { .fc_id = 479, .cpu_id = 328, .valid = 0, .name = "" }, { .fc_id = 480, .cpu_id = 329, .valid = 0, .name = "" }, { .fc_id = 481, .cpu_id = 330, .valid = 0, .name = "" }, { .fc_id = 482, .cpu_id = 331, .valid = 0, .name = "" }, { .fc_id = 483, .cpu_id = 332, .valid = 1, .name = "NIC0_CS_DBG_DERR" }, { .fc_id = 484, .cpu_id = 333, .valid = 0, .name = "" }, { .fc_id = 485, .cpu_id = 334, .valid = 0, .name = "" }, { .fc_id = 486, .cpu_id = 335, .valid = 0, .name = "" }, { .fc_id = 487, .cpu_id = 336, .valid = 1, .name = "NIC1_CS_DBG_DERR" }, { .fc_id = 488, .cpu_id = 337, .valid = 0, .name = "" }, { .fc_id = 489, .cpu_id = 338, .valid = 0, .name = "" }, { .fc_id = 490, .cpu_id = 339, .valid = 0, .name = "" }, { .fc_id = 491, .cpu_id = 340, .valid = 1, .name = "NIC2_CS_DBG_DERR" }, { .fc_id = 492, .cpu_id = 341, .valid = 0, .name = "" }, { .fc_id = 493, .cpu_id = 342, .valid = 0, .name = "" }, { .fc_id = 494, .cpu_id = 343, .valid = 0, .name = "" }, { .fc_id = 495, .cpu_id = 344, .valid = 1, .name = "NIC3_CS_DBG_DERR" }, { .fc_id = 496, .cpu_id = 345, .valid = 0, .name = "" }, { .fc_id = 497, .cpu_id = 346, .valid = 0, .name = "" }, { .fc_id = 498, .cpu_id = 347, .valid = 0, .name = "" }, { .fc_id = 499, .cpu_id = 348, .valid = 1, .name = "NIC4_CS_DBG_DERR" }, { .fc_id = 500, .cpu_id = 349, .valid = 0, .name = "" }, { .fc_id = 501, .cpu_id = 350, .valid = 0, .name = "" }, { .fc_id = 502, .cpu_id = 351, .valid = 0, .name = "" }, { .fc_id = 503, .cpu_id = 352, .valid = 0, .name = "" }, { .fc_id = 504, .cpu_id = 353, .valid = 0, .name = "" }, { .fc_id = 505, .cpu_id = 354, .valid = 0, .name = "" }, { .fc_id = 506, .cpu_id = 355, .valid = 0, .name = "" }, { .fc_id = 507, .cpu_id = 356, .valid = 0, .name = "" }, { .fc_id = 508, .cpu_id = 357, .valid = 0, .name = "" }, { .fc_id = 509, .cpu_id = 358, .valid = 0, .name = "" }, { .fc_id = 510, .cpu_id = 359, .valid = 0, .name = "" }, { .fc_id = 511, .cpu_id = 360, .valid = 0, .name = "" }, { .fc_id = 512, .cpu_id = 361, .valid = 0, .name = "" }, { .fc_id = 513, .cpu_id = 362, .valid = 0, .name = "" }, { .fc_id = 514, .cpu_id = 363, .valid = 0, .name = "" }, { .fc_id = 515, .cpu_id = 364, .valid = 0, .name = "" }, { .fc_id = 516, .cpu_id = 365, .valid = 0, .name = "" }, { .fc_id = 517, .cpu_id = 366, .valid = 0, .name = "" }, { .fc_id = 518, .cpu_id = 367, .valid = 0, .name = "" }, { .fc_id = 519, .cpu_id = 368, .valid = 0, .name = "" }, { .fc_id = 520, .cpu_id = 369, .valid = 0, .name = "" }, { .fc_id = 521, .cpu_id = 370, .valid = 0, .name = "" }, { .fc_id = 522, .cpu_id = 371, .valid = 0, .name = "" }, { .fc_id = 523, .cpu_id = 372, .valid = 0, .name = "" }, { .fc_id = 524, .cpu_id = 373, .valid = 0, .name = "" }, { .fc_id = 525, .cpu_id = 374, .valid = 0, .name = "" }, { .fc_id = 526, .cpu_id = 375, .valid = 0, .name = "" }, { .fc_id = 527, .cpu_id = 376, .valid = 0, .name = "" }, { .fc_id = 528, .cpu_id = 377, .valid = 0, .name = "" }, { .fc_id = 529, .cpu_id = 378, .valid = 0, .name = "" }, { .fc_id = 530, .cpu_id = 379, .valid = 0, .name = "" }, { .fc_id = 531, .cpu_id = 380, .valid = 0, .name = "" }, { .fc_id = 532, .cpu_id = 381, .valid = 0, .name = "" }, { .fc_id = 533, .cpu_id = 382, .valid = 0, .name = "" }, { .fc_id = 534, .cpu_id = 383, .valid = 0, .name = "" }, { .fc_id = 535, .cpu_id = 384, .valid = 0, .name = "" }, { .fc_id = 536, .cpu_id = 385, .valid = 0, .name = "" }, { .fc_id = 537, .cpu_id = 386, .valid = 0, .name = "" }, { .fc_id = 538, .cpu_id = 387, .valid = 0, .name = "" }, { .fc_id = 539, .cpu_id = 388, .valid = 0, .name = "" }, { .fc_id = 540, .cpu_id = 389, .valid = 0, .name = "" }, { .fc_id = 541, .cpu_id = 390, .valid = 0, .name = "" }, { .fc_id = 542, .cpu_id = 391, .valid = 0, .name = "" }, { .fc_id = 543, .cpu_id = 392, .valid = 0, .name = "" }, { .fc_id = 544, .cpu_id = 393, .valid = 0, .name = "" }, { .fc_id = 545, .cpu_id = 394, .valid = 0, .name = "" }, { .fc_id = 546, .cpu_id = 395, .valid = 0, .name = "" }, { .fc_id = 547, .cpu_id = 396, .valid = 0, .name = "" }, { .fc_id = 548, .cpu_id = 397, .valid = 1, .name = "RAZWI_OR_ADC" }, { .fc_id = 549, .cpu_id = 398, .valid = 0, .name = "" }, { .fc_id = 550, .cpu_id = 399, .valid = 0, .name = "" }, { .fc_id = 551, .cpu_id = 400, .valid = 0, .name = "" }, { .fc_id = 552, .cpu_id = 401, .valid = 0, .name = "" }, { .fc_id = 553, .cpu_id = 402, .valid = 0, .name = "" }, { .fc_id = 554, .cpu_id = 403, .valid = 0, .name = "" }, { .fc_id = 555, .cpu_id = 404, .valid = 0, .name = "" }, { .fc_id = 556, .cpu_id = 405, .valid = 0, .name = "" }, { .fc_id = 557, .cpu_id = 406, .valid = 0, .name = "" }, { .fc_id = 558, .cpu_id = 407, .valid = 0, .name = "" }, { .fc_id = 559, .cpu_id = 408, .valid = 0, .name = "" }, { .fc_id = 560, .cpu_id = 409, .valid = 0, .name = "" }, { .fc_id = 561, .cpu_id = 410, .valid = 0, .name = "" }, { .fc_id = 562, .cpu_id = 411, .valid = 0, .name = "" }, { .fc_id = 563, .cpu_id = 412, .valid = 0, .name = "" }, { .fc_id = 564, .cpu_id = 413, .valid = 0, .name = "" }, { .fc_id = 565, .cpu_id = 414, .valid = 0, .name = "" }, { .fc_id = 566, .cpu_id = 415, .valid = 0, .name = "" }, { .fc_id = 567, .cpu_id = 416, .valid = 0, .name = "" }, { .fc_id = 568, .cpu_id = 417, .valid = 0, .name = "" }, { .fc_id = 569, .cpu_id = 418, .valid = 0, .name = "" }, { .fc_id = 570, .cpu_id = 419, .valid = 0, .name = "" }, { .fc_id = 571, .cpu_id = 420, .valid = 0, .name = "" }, { .fc_id = 572, .cpu_id = 421, .valid = 1, .name = "TPC0_QM" }, { .fc_id = 573, .cpu_id = 422, .valid = 1, .name = "TPC1_QM" }, { .fc_id = 574, .cpu_id = 423, .valid = 1, .name = "TPC2_QM" }, { .fc_id = 575, .cpu_id = 424, .valid = 1, .name = "TPC3_QM" }, { .fc_id = 576, .cpu_id = 425, .valid = 1, .name = "TPC4_QM" }, { .fc_id = 577, .cpu_id = 426, .valid = 1, .name = "TPC5_QM" }, { .fc_id = 578, .cpu_id = 427, .valid = 1, .name = "TPC6_QM" }, { .fc_id = 579, .cpu_id = 428, .valid = 1, .name = "TPC7_QM" }, { .fc_id = 580, .cpu_id = 429, .valid = 0, .name = "" }, { .fc_id = 581, .cpu_id = 430, .valid = 1, .name = "MME0_QM" }, { .fc_id = 582, .cpu_id = 431, .valid = 1, .name = "MME2_QM" }, { .fc_id = 583, .cpu_id = 432, .valid = 1, .name = "DMA0_QM" }, { .fc_id = 584, .cpu_id = 433, .valid = 1, .name = "DMA1_QM" }, { .fc_id = 585, .cpu_id = 434, .valid = 1, .name = "DMA2_QM" }, { .fc_id = 586, .cpu_id = 435, .valid = 1, .name = "DMA3_QM" }, { .fc_id = 587, .cpu_id = 436, .valid = 1, .name = "DMA4_QM" }, { .fc_id = 588, .cpu_id = 437, .valid = 1, .name = "DMA5_QM" }, { .fc_id = 589, .cpu_id = 438, .valid = 1, .name = "DMA6_QM" }, { .fc_id = 590, .cpu_id = 439, .valid = 1, .name = "DMA7_QM" }, { .fc_id = 591, .cpu_id = 440, .valid = 0, .name = "" }, { .fc_id = 592, .cpu_id = 441, .valid = 0, .name = "" }, { .fc_id = 593, .cpu_id = 442, .valid = 0, .name = "" }, { .fc_id = 594, .cpu_id = 443, .valid = 1, .name = "NIC0_QM0" }, { .fc_id = 595, .cpu_id = 444, .valid = 1, .name = "NIC0_QM1" }, { .fc_id = 596, .cpu_id = 445, .valid = 1, .name = "NIC1_QM0" }, { .fc_id = 597, .cpu_id = 446, .valid = 1, .name = "NIC1_QM1" }, { .fc_id = 598, .cpu_id = 447, .valid = 1, .name = "NIC2_QM0" }, { .fc_id = 599, .cpu_id = 448, .valid = 1, .name = "NIC2_QM1" }, { .fc_id = 600, .cpu_id = 449, .valid = 1, .name = "NIC3_QM0" }, { .fc_id = 601, .cpu_id = 450, .valid = 1, .name = "NIC3_QM1" }, { .fc_id = 602, .cpu_id = 451, .valid = 1, .name = "NIC4_QM0" }, { .fc_id = 603, .cpu_id = 452, .valid = 1, .name = "NIC4_QM1" }, { .fc_id = 604, .cpu_id = 453, .valid = 1, .name = "DMA0_CORE" }, { .fc_id = 605, .cpu_id = 454, .valid = 1, .name = "DMA1_CORE" }, { .fc_id = 606, .cpu_id = 455, .valid = 1, .name = "DMA2_CORE" }, { .fc_id = 607, .cpu_id = 456, .valid = 1, .name = "DMA3_CORE" }, { .fc_id = 608, .cpu_id = 457, .valid = 1, .name = "DMA4_CORE" }, { .fc_id = 609, .cpu_id = 458, .valid = 1, .name = "DMA5_CORE" }, { .fc_id = 610, .cpu_id = 459, .valid = 1, .name = "DMA6_CORE" }, { .fc_id = 611, .cpu_id = 460, .valid = 1, .name = "DMA7_CORE" }, { .fc_id = 612, .cpu_id = 461, .valid = 1, .name = "NIC0_QP0" }, { .fc_id = 613, .cpu_id = 462, .valid = 1, .name = "NIC0_QP1" }, { .fc_id = 614, .cpu_id = 463, .valid = 1, .name = "NIC1_QP0" }, { .fc_id = 615, .cpu_id = 464, .valid = 1, .name = "NIC1_QP1" }, { .fc_id = 616, .cpu_id = 465, .valid = 1, .name = "NIC2_QP0" }, { .fc_id = 617, .cpu_id = 466, .valid = 1, .name = "NIC2_QP1" }, { .fc_id = 618, .cpu_id = 467, .valid = 1, .name = "NIC3_QP0" }, { .fc_id = 619, .cpu_id = 468, .valid = 1, .name = "NIC3_QP1" }, { .fc_id = 620, .cpu_id = 469, .valid = 1, .name = "NIC4_QP0" }, { .fc_id = 621, .cpu_id = 470, .valid = 1, .name = "NIC4_QP1" }, { .fc_id = 622, .cpu_id = 471, .valid = 0, .name = "" }, { .fc_id = 623, .cpu_id = 472, .valid = 0, .name = "" }, { .fc_id = 624, .cpu_id = 473, .valid = 0, .name = "" }, { .fc_id = 625, .cpu_id = 474, .valid = 0, .name = "" }, { .fc_id = 626, .cpu_id = 475, .valid = 0, .name = "" }, { .fc_id = 627, .cpu_id = 476, .valid = 0, .name = "" }, { .fc_id = 628, .cpu_id = 477, .valid = 0, .name = "" }, { .fc_id = 629, .cpu_id = 478, .valid = 0, .name = "" }, { .fc_id = 630, .cpu_id = 479, .valid = 0, .name = "" }, { .fc_id = 631, .cpu_id = 480, .valid = 0, .name = "" }, { .fc_id = 632, .cpu_id = 481, .valid = 0, .name = "" }, { .fc_id = 633, .cpu_id = 482, .valid = 0, .name = "" }, { .fc_id = 634, .cpu_id = 483, .valid = 0, .name = "" }, { .fc_id = 635, .cpu_id = 484, .valid = 1, .name = "PI_UPDATE" }, { .fc_id = 636, .cpu_id = 485, .valid = 1, .name = "HALT_MACHINE" }, { .fc_id = 637, .cpu_id = 486, .valid = 1, .name = "INTS_REGISTER" }, { .fc_id = 638, .cpu_id = 487, .valid = 1, .name = "SOFT_RESET" }, { .fc_id = 639, .cpu_id = 488, .valid = 0, .name = "" }, { .fc_id = 640, .cpu_id = 489, .valid = 0, .name = "" }, { .fc_id = 641, .cpu_id = 490, .valid = 0, .name = "" }, { .fc_id = 642, .cpu_id = 491, .valid = 0, .name = "" }, { .fc_id = 643, .cpu_id = 492, .valid = 0, .name = "" }, { .fc_id = 644, .cpu_id = 493, .valid = 0, .name = "" }, { .fc_id = 645, .cpu_id = 494, .valid = 1, .name = "FW_ALIVE_S" }, { .fc_id = 646, .cpu_id = 495, .valid = 1, .name = "DEV_RESET_REQ" }, { .fc_id = 647, .cpu_id = 496, .valid = 1, .name = "PKT_QUEUE_OUT_SYNC" }, { .fc_id = 648, .cpu_id = 497, .valid = 1, .name = "STATUS_NIC0_ENG0" }, { .fc_id = 649, .cpu_id = 498, .valid = 1, .name = "STATUS_NIC0_ENG1" }, { .fc_id = 650, .cpu_id = 499, .valid = 1, .name = "STATUS_NIC1_ENG0" }, { .fc_id = 651, .cpu_id = 500, .valid = 1, .name = "STATUS_NIC1_ENG1" }, { .fc_id = 652, .cpu_id = 501, .valid = 1, .name = "STATUS_NIC2_ENG0" }, { .fc_id = 653, .cpu_id = 502, .valid = 1, .name = "STATUS_NIC2_ENG1" }, { .fc_id = 654, .cpu_id = 503, .valid = 1, .name = "STATUS_NIC3_ENG0" }, { .fc_id = 655, .cpu_id = 504, .valid = 1, .name = "STATUS_NIC3_ENG1" }, { .fc_id = 656, .cpu_id = 505, .valid = 1, .name = "STATUS_NIC4_ENG0" }, { .fc_id = 657, .cpu_id = 506, .valid = 1, .name = "STATUS_NIC4_ENG1" }, { .fc_id = 658, .cpu_id = 507, .valid = 1, .name = "FIX_POWER_ENV_S" }, { .fc_id = 659, .cpu_id = 508, .valid = 1, .name = "FIX_POWER_ENV_E" }, { .fc_id = 660, .cpu_id = 509, .valid = 1, .name = "FIX_THERMAL_ENV_S" }, { .fc_id = 661, .cpu_id = 510, .valid = 1, .name = "FIX_THERMAL_ENV_E" }, { .fc_id = 662, .cpu_id = 511, .valid = 1, .name = "RAZWI_OR_ADC_SW" }, }; #endif /* __GAUDI_ASYNC_IDS_MAP_EVENTS_EXT_H_ */
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2011 Texas Instruments Incorporated - https://www.ti.com/ */ /dts-v1/; #include "omap34xx.dtsi" #include "omap3-evm-common.dtsi" #include "omap3-evm-processor-common.dtsi" / { model = "TI OMAP35XX EVM (TMDSEVM3530)"; compatible = "ti,omap3-evm", "ti,omap3430", "ti,omap3"; }; &omap3_pmx_core2 { pinctrl-names = "default"; pinctrl-0 = <&hsusb2_2_pins>; ehci_phy_pins: ehci-phy-pins { pinctrl-single,pins = < /* EHCI PHY reset GPIO etk_d7.gpio_21 */ OMAP3430_CORE2_IOPAD(0x25ea, PIN_OUTPUT | MUX_MODE4) /* EHCI VBUS etk_d8.gpio_22 */ OMAP3430_CORE2_IOPAD(0x25ec, PIN_OUTPUT | MUX_MODE4) >; }; /* Used by OHCI and EHCI. OHCI won't work without external phy */ hsusb2_2_pins: hsusb2-2-pins { pinctrl-single,pins = < /* etk_d10.hsusb2_clk */ OMAP3430_CORE2_IOPAD(0x25f0, PIN_OUTPUT | MUX_MODE3) /* etk_d11.hsusb2_stp */ OMAP3430_CORE2_IOPAD(0x25f2, PIN_OUTPUT | MUX_MODE3) /* etk_d12.hsusb2_dir */ OMAP3430_CORE2_IOPAD(0x25f4, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d13.hsusb2_nxt */ OMAP3430_CORE2_IOPAD(0x25f6, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d14.hsusb2_data0 */ OMAP3430_CORE2_IOPAD(0x25f8, PIN_INPUT_PULLDOWN | MUX_MODE3) /* etk_d15.hsusb2_data1 */ OMAP3430_CORE2_IOPAD(0x25fa, PIN_INPUT_PULLDOWN | MUX_MODE3) >; }; }; &gpmc { nand@0,0 { compatible = "ti,omap2-nand"; reg = <0 0 4>; /* CS0, offset 0, IO size 4 */ interrupt-parent = <&gpmc>; interrupts = <0 IRQ_TYPE_NONE>, /* fifoevent */ <1 IRQ_TYPE_NONE>; /* termcount */ nand-bus-width = <16>; gpmc,device-width = <2>; ti,nand-ecc-opt = "bch8"; gpmc,sync-clk-ps = <0>; gpmc,cs-on-ns = <0>; gpmc,cs-rd-off-ns = <44>; gpmc,cs-wr-off-ns = <44>; gpmc,adv-on-ns = <6>; gpmc,adv-rd-off-ns = <34>; gpmc,adv-wr-off-ns = <44>; gpmc,we-off-ns = <40>; gpmc,oe-off-ns = <54>; gpmc,access-ns = <64>; gpmc,rd-cycle-ns = <82>; gpmc,wr-cycle-ns = <82>; gpmc,wr-access-ns = <40>; gpmc,wr-data-mux-bus-ns = <0>; #address-cells = <1>; #size-cells = <1>; }; };
/* * Copyright (c) 1993-2014, NVIDIA CORPORATION. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #ifndef _cl907c_h_ #define _cl907c_h_ // class methods #define NV907C_SET_PRESENT_CONTROL (0x00000084) #define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE 9:8 #define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_NON_TEARING (0x00000000) #define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_IMMEDIATE (0x00000001) #define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_ON_LINE (0x00000002) #define NV907C_SET_PRESENT_CONTROL_BEGIN_MODE_AT_FRAME (0x00000003) #define NV907C_SET_PRESENT_CONTROL_TIMESTAMP_MODE 2:2 #define NV907C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_DISABLE (0x00000000) #define NV907C_SET_PRESENT_CONTROL_TIMESTAMP_MODE_ENABLE (0x00000001) #define NV907C_SET_PRESENT_CONTROL_MIN_PRESENT_INTERVAL 7:4 #define NV907C_SET_PRESENT_CONTROL_BEGIN_LINE 30:16 #define NV907C_SET_PRESENT_CONTROL_ON_LINE_MARGIN 15:10 #define NV907C_SET_CONTEXT_DMAS_ISO(b) (0x000000C0 + (b)*0x00000004) #define NV907C_SET_CONTEXT_DMAS_ISO_HANDLE 31:0 #define NV907C_SET_BASE_LUT_LO (0x000000E0) #define NV907C_SET_BASE_LUT_LO_ENABLE 31:30 #define NV907C_SET_BASE_LUT_LO_ENABLE_DISABLE (0x00000000) #define NV907C_SET_BASE_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) #define NV907C_SET_BASE_LUT_LO_ENABLE_ENABLE (0x00000002) #define NV907C_SET_BASE_LUT_LO_MODE 27:24 #define NV907C_SET_BASE_LUT_LO_MODE_LORES (0x00000000) #define NV907C_SET_BASE_LUT_LO_MODE_HIRES (0x00000001) #define NV907C_SET_BASE_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) #define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) #define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) #define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) #define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) #define NV907C_SET_BASE_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) #define NV907C_SET_BASE_LUT_HI (0x000000E4) #define NV907C_SET_BASE_LUT_HI_ORIGIN 31:0 #define NV907C_SET_OUTPUT_LUT_LO (0x000000E8) #define NV907C_SET_OUTPUT_LUT_LO_ENABLE 31:30 #define NV907C_SET_OUTPUT_LUT_LO_ENABLE_DISABLE (0x00000000) #define NV907C_SET_OUTPUT_LUT_LO_ENABLE_USE_CORE_LUT (0x00000001) #define NV907C_SET_OUTPUT_LUT_LO_ENABLE_ENABLE (0x00000002) #define NV907C_SET_OUTPUT_LUT_LO_MODE 27:24 #define NV907C_SET_OUTPUT_LUT_LO_MODE_LORES (0x00000000) #define NV907C_SET_OUTPUT_LUT_LO_MODE_HIRES (0x00000001) #define NV907C_SET_OUTPUT_LUT_LO_MODE_INDEX_1025_UNITY_RANGE (0x00000003) #define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_UNITY_RANGE (0x00000004) #define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XRBIAS_RANGE (0x00000005) #define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_1025_XVYCC_RANGE (0x00000006) #define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_UNITY_RANGE (0x00000007) #define NV907C_SET_OUTPUT_LUT_LO_MODE_INTERPOLATE_257_LEGACY_RANGE (0x00000008) #define NV907C_SET_CONTEXT_DMA_LUT (0x000000FC) #define NV907C_SET_CONTEXT_DMA_LUT_HANDLE 31:0 #define NV907C_SET_CSC_RED2RED (0x00000140) #define NV907C_SET_CSC_RED2RED_OWNER 31:31 #define NV907C_SET_CSC_RED2RED_OWNER_CORE (0x00000000) #define NV907C_SET_CSC_RED2RED_OWNER_BASE (0x00000001) #define NV907C_SET_CSC_RED2RED_COEFF 18:0 #define NV907C_SET_CSC_GRN2RED (0x00000144) #define NV907C_SET_CSC_GRN2RED_COEFF 18:0 #define NV907C_SET_CSC_BLU2RED (0x00000148) #define NV907C_SET_CSC_BLU2RED_COEFF 18:0 #define NV907C_SET_CSC_CONSTANT2RED (0x0000014C) #define NV907C_SET_CSC_CONSTANT2RED_COEFF 18:0 #define NV907C_SET_CSC_RED2GRN (0x00000150) #define NV907C_SET_CSC_RED2GRN_COEFF 18:0 #define NV907C_SET_CSC_GRN2GRN (0x00000154) #define NV907C_SET_CSC_GRN2GRN_COEFF 18:0 #define NV907C_SET_CSC_BLU2GRN (0x00000158) #define NV907C_SET_CSC_BLU2GRN_COEFF 18:0 #define NV907C_SET_CSC_CONSTANT2GRN (0x0000015C) #define NV907C_SET_CSC_CONSTANT2GRN_COEFF 18:0 #define NV907C_SET_CSC_RED2BLU (0x00000160) #define NV907C_SET_CSC_RED2BLU_COEFF 18:0 #define NV907C_SET_CSC_GRN2BLU (0x00000164) #define NV907C_SET_CSC_GRN2BLU_COEFF 18:0 #define NV907C_SET_CSC_BLU2BLU (0x00000168) #define NV907C_SET_CSC_BLU2BLU_COEFF 18:0 #define NV907C_SET_CSC_CONSTANT2BLU (0x0000016C) #define NV907C_SET_CSC_CONSTANT2BLU_COEFF 18:0 #define NV907C_SURFACE_SET_OFFSET(a,b) (0x00000400 + (a)*0x00000020 + (b)*0x00000004) #define NV907C_SURFACE_SET_OFFSET_ORIGIN 31:0 #define NV907C_SURFACE_SET_SIZE(a) (0x00000408 + (a)*0x00000020) #define NV907C_SURFACE_SET_SIZE_WIDTH 15:0 #define NV907C_SURFACE_SET_SIZE_HEIGHT 31:16 #define NV907C_SURFACE_SET_STORAGE(a) (0x0000040C + (a)*0x00000020) #define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT 3:0 #define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_ONE_GOB (0x00000000) #define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_TWO_GOBS (0x00000001) #define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_FOUR_GOBS (0x00000002) #define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_EIGHT_GOBS (0x00000003) #define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_SIXTEEN_GOBS (0x00000004) #define NV907C_SURFACE_SET_STORAGE_BLOCK_HEIGHT_THIRTYTWO_GOBS (0x00000005) #define NV907C_SURFACE_SET_STORAGE_PITCH 20:8 #define NV907C_SURFACE_SET_STORAGE_MEMORY_LAYOUT 24:24 #define NV907C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_BLOCKLINEAR (0x00000000) #define NV907C_SURFACE_SET_STORAGE_MEMORY_LAYOUT_PITCH (0x00000001) #define NV907C_SURFACE_SET_PARAMS(a) (0x00000410 + (a)*0x00000020) #define NV907C_SURFACE_SET_PARAMS_FORMAT 15:8 #define NV907C_SURFACE_SET_PARAMS_FORMAT_I8 (0x0000001E) #define NV907C_SURFACE_SET_PARAMS_FORMAT_VOID16 (0x0000001F) #define NV907C_SURFACE_SET_PARAMS_FORMAT_VOID32 (0x0000002E) #define NV907C_SURFACE_SET_PARAMS_FORMAT_RF16_GF16_BF16_AF16 (0x000000CA) #define NV907C_SURFACE_SET_PARAMS_FORMAT_A8R8G8B8 (0x000000CF) #define NV907C_SURFACE_SET_PARAMS_FORMAT_A2B10G10R10 (0x000000D1) #define NV907C_SURFACE_SET_PARAMS_FORMAT_X2BL10GL10RL10_XRBIAS (0x00000022) #define NV907C_SURFACE_SET_PARAMS_FORMAT_A8B8G8R8 (0x000000D5) #define NV907C_SURFACE_SET_PARAMS_FORMAT_R5G6B5 (0x000000E8) #define NV907C_SURFACE_SET_PARAMS_FORMAT_A1R5G5B5 (0x000000E9) #define NV907C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16 (0x000000C6) #define NV907C_SURFACE_SET_PARAMS_FORMAT_R16_G16_B16_A16_NVBIAS (0x00000023) #define NV907C_SURFACE_SET_PARAMS_SUPER_SAMPLE 1:0 #define NV907C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X1_AA (0x00000000) #define NV907C_SURFACE_SET_PARAMS_SUPER_SAMPLE_X4_AA (0x00000002) #define NV907C_SURFACE_SET_PARAMS_GAMMA 2:2 #define NV907C_SURFACE_SET_PARAMS_GAMMA_LINEAR (0x00000000) #define NV907C_SURFACE_SET_PARAMS_GAMMA_SRGB (0x00000001) #define NV907C_SURFACE_SET_PARAMS_LAYOUT 5:4 #define NV907C_SURFACE_SET_PARAMS_LAYOUT_FRM (0x00000000) #define NV907C_SURFACE_SET_PARAMS_LAYOUT_FLD1 (0x00000001) #define NV907C_SURFACE_SET_PARAMS_LAYOUT_FLD2 (0x00000002) #endif // _cl907c_h
/* SPDX-License-Identifier: GPL-2.0 * * Copyright 2016-2020 HabanaLabs, Ltd. * All Rights Reserved. * */ /************************************ ** This is an auto-generated file ** ** DO NOT EDIT BELOW ** ************************************/ #ifndef ASIC_REG_DCORE0_TPC0_EML_STM_REGS_H_ #define ASIC_REG_DCORE0_TPC0_EML_STM_REGS_H_ /* ***************************************** * DCORE0_TPC0_EML_STM * (Prototype: STM) ***************************************** */ #define mmDCORE0_TPC0_EML_STM_STMDMASTARTR 0x3C04 #define mmDCORE0_TPC0_EML_STM_STMDMASTOPR 0x3C08 #define mmDCORE0_TPC0_EML_STM_STMDMASTATR 0x3C0C #define mmDCORE0_TPC0_EML_STM_STMDMACTLR 0x3C10 #define mmDCORE0_TPC0_EML_STM_STMDMAIDR 0x3CFC #define mmDCORE0_TPC0_EML_STM_STMHEER 0x3D00 #define mmDCORE0_TPC0_EML_STM_STMHETER 0x3D20 #define mmDCORE0_TPC0_EML_STM_STMHEBSR 0x3D60 #define mmDCORE0_TPC0_EML_STM_STMHEMCR 0x3D64 #define mmDCORE0_TPC0_EML_STM_STMHEEXTMUXR 0x3D68 #define mmDCORE0_TPC0_EML_STM_STMHEMASTR 0x3DF4 #define mmDCORE0_TPC0_EML_STM_STMHEFEAT1R 0x3DF8 #define mmDCORE0_TPC0_EML_STM_STMHEIDR 0x3DFC #define mmDCORE0_TPC0_EML_STM_STMSPER 0x3E00 #define mmDCORE0_TPC0_EML_STM_STMSPTER 0x3E20 #define mmDCORE0_TPC0_EML_STM_STMSPSCR 0x3E60 #define mmDCORE0_TPC0_EML_STM_STMSPMSCR 0x3E64 #define mmDCORE0_TPC0_EML_STM_STMSPOVERRIDER 0x3E68 #define mmDCORE0_TPC0_EML_STM_STMSPMOVERRIDER 0x3E6C #define mmDCORE0_TPC0_EML_STM_STMSPTRIGCSR 0x3E70 #define mmDCORE0_TPC0_EML_STM_STMTCSR 0x3E80 #define mmDCORE0_TPC0_EML_STM_STMTSSTIMR 0x3E84 #define mmDCORE0_TPC0_EML_STM_STMTSFREQR 0x3E8C #define mmDCORE0_TPC0_EML_STM_STMSYNCR 0x3E90 #define mmDCORE0_TPC0_EML_STM_STMAUXCR 0x3E94 #define mmDCORE0_TPC0_EML_STM_STMFEAT1R 0x3EA0 #define mmDCORE0_TPC0_EML_STM_STMFEAT2R 0x3EA4 #define mmDCORE0_TPC0_EML_STM_STMFEAT3R 0x3EA8 #define mmDCORE0_TPC0_EML_STM_STMITTRIGGER 0x3EE8 #define mmDCORE0_TPC0_EML_STM_STMITATBDATA0 0x3EEC #define mmDCORE0_TPC0_EML_STM_STMITATBCTR2 0x3EF0 #define mmDCORE0_TPC0_EML_STM_STMITATBID 0x3EF4 #define mmDCORE0_TPC0_EML_STM_STMITATBCTR0 0x3EF8 #define mmDCORE0_TPC0_EML_STM_STMITCTRL 0x3F00 #define mmDCORE0_TPC0_EML_STM_STMCLAIMSET 0x3FA0 #define mmDCORE0_TPC0_EML_STM_STMCLAIMCLR 0x3FA4 #define mmDCORE0_TPC0_EML_STM_STMLAR 0x3FB0 #define mmDCORE0_TPC0_EML_STM_STMLSR 0x3FB4 #define mmDCORE0_TPC0_EML_STM_STMAUTHSTATUS 0x3FB8 #define mmDCORE0_TPC0_EML_STM_STMDEVARCH 0x3FBC #define mmDCORE0_TPC0_EML_STM_STMDEVID 0x3FC8 #define mmDCORE0_TPC0_EML_STM_STMDEVTYPE 0x3FCC #define mmDCORE0_TPC0_EML_STM_STMPIDR4 0x3FD0 #define mmDCORE0_TPC0_EML_STM_STMPIDR5 0x3FD4 #define mmDCORE0_TPC0_EML_STM_STMPIDR6 0x3FD8 #define mmDCORE0_TPC0_EML_STM_STMPIDR7 0x3FDC #define mmDCORE0_TPC0_EML_STM_STMPIDR0 0x3FE0 #define mmDCORE0_TPC0_EML_STM_STMPIDR1 0x3FE4 #define mmDCORE0_TPC0_EML_STM_STMPIDR2 0x3FE8 #define mmDCORE0_TPC0_EML_STM_STMPIDR3 0x3FEC #define mmDCORE0_TPC0_EML_STM_STMCIDR0 0x3FF0 #define mmDCORE0_TPC0_EML_STM_STMCIDR1 0x3FF4 #define mmDCORE0_TPC0_EML_STM_STMCIDR2 0x3FF8 #define mmDCORE0_TPC0_EML_STM_STMCIDR3 0x3FFC #endif /* ASIC_REG_DCORE0_TPC0_EML_STM_REGS_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _MTK_VCODEC_FW_PRIV_H_ #define _MTK_VCODEC_FW_PRIV_H_ #include "mtk_vcodec_fw.h" struct mtk_vcodec_dec_dev; struct mtk_vcodec_enc_dev; struct mtk_vcodec_fw { enum mtk_vcodec_fw_type type; const struct mtk_vcodec_fw_ops *ops; struct platform_device *pdev; struct mtk_scp *scp; enum mtk_vcodec_fw_use fw_use; }; struct mtk_vcodec_fw_ops { int (*load_firmware)(struct mtk_vcodec_fw *fw); unsigned int (*get_vdec_capa)(struct mtk_vcodec_fw *fw); unsigned int (*get_venc_capa)(struct mtk_vcodec_fw *fw); void *(*map_dm_addr)(struct mtk_vcodec_fw *fw, u32 dtcm_dmem_addr); int (*ipi_register)(struct mtk_vcodec_fw *fw, int id, mtk_vcodec_ipi_handler handler, const char *name, void *priv); int (*ipi_send)(struct mtk_vcodec_fw *fw, int id, void *buf, unsigned int len, unsigned int wait); void (*release)(struct mtk_vcodec_fw *fw); }; #if IS_ENABLED(CONFIG_VIDEO_MEDIATEK_VCODEC_VPU) struct mtk_vcodec_fw *mtk_vcodec_fw_vpu_init(void *priv, enum mtk_vcodec_fw_use fw_use); #else static inline struct mtk_vcodec_fw * mtk_vcodec_fw_vpu_init(void *priv, enum mtk_vcodec_fw_use fw_use) { return ERR_PTR(-ENODEV); } #endif /* CONFIG_VIDEO_MEDIATEK_VCODEC_VPU */ #if IS_ENABLED(CONFIG_VIDEO_MEDIATEK_VCODEC_SCP) struct mtk_vcodec_fw *mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use fw_use); #else static inline struct mtk_vcodec_fw * mtk_vcodec_fw_scp_init(void *priv, enum mtk_vcodec_fw_use fw_use) { return ERR_PTR(-ENODEV); } #endif /* CONFIG_VIDEO_MEDIATEK_VCODEC_SCP */ #endif /* _MTK_VCODEC_FW_PRIV_H_ */
/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */ /* Copyright(c) 2015-17 Intel Corporation. */ #ifndef __SDW_REGISTERS_H #define __SDW_REGISTERS_H /* * SDW registers as defined by MIPI 1.2 Spec */ #define SDW_REGADDR GENMASK(14, 0) #define SDW_SCP_ADDRPAGE2_MASK GENMASK(22, 15) #define SDW_SCP_ADDRPAGE1_MASK GENMASK(30, 23) #define SDW_REG_NO_PAGE 0x00008000 #define SDW_REG_OPTIONAL_PAGE 0x00010000 #define SDW_REG_MAX 0x48000000 #define SDW_DPN_SIZE 0x100 #define SDW_BANK1_OFFSET 0x10 /* * DP0 Interrupt register & bits * * Spec treats Status (RO) and Clear (WC) as separate but they are same * address, so treat as same register with WC. */ /* both INT and STATUS register are same */ #define SDW_DP0_INT 0x0 #define SDW_DP0_INTMASK 0x1 #define SDW_DP0_PORTCTRL 0x2 #define SDW_DP0_BLOCKCTRL1 0x3 #define SDW_DP0_PREPARESTATUS 0x4 #define SDW_DP0_PREPARECTRL 0x5 #define SDW_DP0_INT_TEST_FAIL BIT(0) #define SDW_DP0_INT_PORT_READY BIT(1) #define SDW_DP0_INT_BRA_FAILURE BIT(2) #define SDW_DP0_SDCA_CASCADE BIT(3) /* BIT(4) not allocated in SoundWire specification 1.2 */ #define SDW_DP0_INT_IMPDEF1 BIT(5) #define SDW_DP0_INT_IMPDEF2 BIT(6) #define SDW_DP0_INT_IMPDEF3 BIT(7) #define SDW_DP0_INTERRUPTS (SDW_DP0_INT_TEST_FAIL | \ SDW_DP0_INT_PORT_READY | \ SDW_DP0_INT_BRA_FAILURE | \ SDW_DP0_INT_IMPDEF1 | \ SDW_DP0_INT_IMPDEF2 | \ SDW_DP0_INT_IMPDEF3) #define SDW_DP0_PORTCTRL_DATAMODE GENMASK(3, 2) #define SDW_DP0_PORTCTRL_NXTINVBANK BIT(4) #define SDW_DP0_PORTCTRL_BPT_PAYLD GENMASK(7, 6) #define SDW_DP0_CHANNELEN 0x20 #define SDW_DP0_SAMPLECTRL1 0x22 #define SDW_DP0_SAMPLECTRL2 0x23 #define SDW_DP0_OFFSETCTRL1 0x24 #define SDW_DP0_OFFSETCTRL2 0x25 #define SDW_DP0_HCTRL 0x26 #define SDW_DP0_LANECTRL 0x28 /* Both INT and STATUS register are same */ #define SDW_SCP_INT1 0x40 #define SDW_SCP_INTMASK1 0x41 #define SDW_SCP_INT1_PARITY BIT(0) #define SDW_SCP_INT1_BUS_CLASH BIT(1) #define SDW_SCP_INT1_IMPL_DEF BIT(2) #define SDW_SCP_INT1_SCP2_CASCADE BIT(7) #define SDW_SCP_INT1_PORT0_3 GENMASK(6, 3) #define SDW_SCP_INTSTAT2 0x42 #define SDW_SCP_INTSTAT2_SCP3_CASCADE BIT(7) #define SDW_SCP_INTSTAT2_PORT4_10 GENMASK(6, 0) #define SDW_SCP_INTSTAT3 0x43 #define SDW_SCP_INTSTAT3_PORT11_14 GENMASK(3, 0) /* Number of interrupt status registers */ #define SDW_NUM_INT_STAT_REGISTERS 3 /* Number of interrupt clear registers */ #define SDW_NUM_INT_CLEAR_REGISTERS 1 #define SDW_SCP_CTRL 0x44 #define SDW_SCP_CTRL_CLK_STP_NOW BIT(1) #define SDW_SCP_CTRL_FORCE_RESET BIT(7) #define SDW_SCP_STAT 0x44 #define SDW_SCP_STAT_CLK_STP_NF BIT(0) #define SDW_SCP_STAT_HPHY_NOK BIT(5) #define SDW_SCP_STAT_CURR_BANK BIT(6) #define SDW_SCP_SYSTEMCTRL 0x45 #define SDW_SCP_SYSTEMCTRL_CLK_STP_PREP BIT(0) #define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE BIT(2) #define SDW_SCP_SYSTEMCTRL_WAKE_UP_EN BIT(3) #define SDW_SCP_SYSTEMCTRL_HIGH_PHY BIT(4) #define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE0 0 #define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1 BIT(2) #define SDW_SCP_DEVNUMBER 0x46 #define SDW_SCP_HIGH_PHY_CHECK 0x47 #define SDW_SCP_ADDRPAGE1 0x48 #define SDW_SCP_ADDRPAGE2 0x49 #define SDW_SCP_KEEPEREN 0x4A #define SDW_SCP_BANKDELAY 0x4B #define SDW_SCP_COMMIT 0x4C #define SDW_SCP_BUS_CLOCK_BASE 0x4D #define SDW_SCP_BASE_CLOCK_FREQ GENMASK(2, 0) #define SDW_SCP_BASE_CLOCK_UNKNOWN 0x0 #define SDW_SCP_BASE_CLOCK_19200000_HZ 0x1 #define SDW_SCP_BASE_CLOCK_24000000_HZ 0x2 #define SDW_SCP_BASE_CLOCK_24576000_HZ 0x3 #define SDW_SCP_BASE_CLOCK_22579200_HZ 0x4 #define SDW_SCP_BASE_CLOCK_32000000_HZ 0x5 #define SDW_SCP_BASE_CLOCK_RESERVED 0x6 #define SDW_SCP_BASE_CLOCK_IMP_DEF 0x7 /* 0x4E is not allocated in SoundWire specification 1.2 */ #define SDW_SCP_TESTMODE 0x4F #define SDW_SCP_DEVID_0 0x50 #define SDW_SCP_DEVID_1 0x51 #define SDW_SCP_DEVID_2 0x52 #define SDW_SCP_DEVID_3 0x53 #define SDW_SCP_DEVID_4 0x54 #define SDW_SCP_DEVID_5 0x55 /* Both INT and STATUS register are same */ #define SDW_SCP_SDCA_INT1 0x58 #define SDW_SCP_SDCA_INT_SDCA_0 BIT(0) #define SDW_SCP_SDCA_INT_SDCA_1 BIT(1) #define SDW_SCP_SDCA_INT_SDCA_2 BIT(2) #define SDW_SCP_SDCA_INT_SDCA_3 BIT(3) #define SDW_SCP_SDCA_INT_SDCA_4 BIT(4) #define SDW_SCP_SDCA_INT_SDCA_5 BIT(5) #define SDW_SCP_SDCA_INT_SDCA_6 BIT(6) #define SDW_SCP_SDCA_INT_SDCA_7 BIT(7) #define SDW_SCP_SDCA_INT2 0x59 #define SDW_SCP_SDCA_INT_SDCA_8 BIT(0) #define SDW_SCP_SDCA_INT_SDCA_9 BIT(1) #define SDW_SCP_SDCA_INT_SDCA_10 BIT(2) #define SDW_SCP_SDCA_INT_SDCA_11 BIT(3) #define SDW_SCP_SDCA_INT_SDCA_12 BIT(4) #define SDW_SCP_SDCA_INT_SDCA_13 BIT(5) #define SDW_SCP_SDCA_INT_SDCA_14 BIT(6) #define SDW_SCP_SDCA_INT_SDCA_15 BIT(7) #define SDW_SCP_SDCA_INT3 0x5A #define SDW_SCP_SDCA_INT_SDCA_16 BIT(0) #define SDW_SCP_SDCA_INT_SDCA_17 BIT(1) #define SDW_SCP_SDCA_INT_SDCA_18 BIT(2) #define SDW_SCP_SDCA_INT_SDCA_19 BIT(3) #define SDW_SCP_SDCA_INT_SDCA_20 BIT(4) #define SDW_SCP_SDCA_INT_SDCA_21 BIT(5) #define SDW_SCP_SDCA_INT_SDCA_22 BIT(6) #define SDW_SCP_SDCA_INT_SDCA_23 BIT(7) #define SDW_SCP_SDCA_INT4 0x5B #define SDW_SCP_SDCA_INT_SDCA_24 BIT(0) #define SDW_SCP_SDCA_INT_SDCA_25 BIT(1) #define SDW_SCP_SDCA_INT_SDCA_26 BIT(2) #define SDW_SCP_SDCA_INT_SDCA_27 BIT(3) #define SDW_SCP_SDCA_INT_SDCA_28 BIT(4) #define SDW_SCP_SDCA_INT_SDCA_29 BIT(5) #define SDW_SCP_SDCA_INT_SDCA_30 BIT(6) /* BIT(7) not allocated in SoundWire 1.2 specification */ #define SDW_SCP_SDCA_INTMASK1 0x5C #define SDW_SCP_SDCA_INTMASK_SDCA_0 BIT(0) #define SDW_SCP_SDCA_INTMASK_SDCA_1 BIT(1) #define SDW_SCP_SDCA_INTMASK_SDCA_2 BIT(2) #define SDW_SCP_SDCA_INTMASK_SDCA_3 BIT(3) #define SDW_SCP_SDCA_INTMASK_SDCA_4 BIT(4) #define SDW_SCP_SDCA_INTMASK_SDCA_5 BIT(5) #define SDW_SCP_SDCA_INTMASK_SDCA_6 BIT(6) #define SDW_SCP_SDCA_INTMASK_SDCA_7 BIT(7) #define SDW_SCP_SDCA_INTMASK2 0x5D #define SDW_SCP_SDCA_INTMASK_SDCA_8 BIT(0) #define SDW_SCP_SDCA_INTMASK_SDCA_9 BIT(1) #define SDW_SCP_SDCA_INTMASK_SDCA_10 BIT(2) #define SDW_SCP_SDCA_INTMASK_SDCA_11 BIT(3) #define SDW_SCP_SDCA_INTMASK_SDCA_12 BIT(4) #define SDW_SCP_SDCA_INTMASK_SDCA_13 BIT(5) #define SDW_SCP_SDCA_INTMASK_SDCA_14 BIT(6) #define SDW_SCP_SDCA_INTMASK_SDCA_15 BIT(7) #define SDW_SCP_SDCA_INTMASK3 0x5E #define SDW_SCP_SDCA_INTMASK_SDCA_16 BIT(0) #define SDW_SCP_SDCA_INTMASK_SDCA_17 BIT(1) #define SDW_SCP_SDCA_INTMASK_SDCA_18 BIT(2) #define SDW_SCP_SDCA_INTMASK_SDCA_19 BIT(3) #define SDW_SCP_SDCA_INTMASK_SDCA_20 BIT(4) #define SDW_SCP_SDCA_INTMASK_SDCA_21 BIT(5) #define SDW_SCP_SDCA_INTMASK_SDCA_22 BIT(6) #define SDW_SCP_SDCA_INTMASK_SDCA_23 BIT(7) #define SDW_SCP_SDCA_INTMASK4 0x5F #define SDW_SCP_SDCA_INTMASK_SDCA_24 BIT(0) #define SDW_SCP_SDCA_INTMASK_SDCA_25 BIT(1) #define SDW_SCP_SDCA_INTMASK_SDCA_26 BIT(2) #define SDW_SCP_SDCA_INTMASK_SDCA_27 BIT(3) #define SDW_SCP_SDCA_INTMASK_SDCA_28 BIT(4) #define SDW_SCP_SDCA_INTMASK_SDCA_29 BIT(5) #define SDW_SCP_SDCA_INTMASK_SDCA_30 BIT(6) /* BIT(7) not allocated in SoundWire 1.2 specification */ /* Banked Registers */ #define SDW_SCP_FRAMECTRL_B0 0x60 #define SDW_SCP_FRAMECTRL_B1 (0x60 + SDW_BANK1_OFFSET) #define SDW_SCP_NEXTFRAME_B0 0x61 #define SDW_SCP_NEXTFRAME_B1 (0x61 + SDW_BANK1_OFFSET) #define SDW_SCP_BUSCLOCK_SCALE_B0 0x62 #define SDW_SCP_BUSCLOCK_SCALE_B1 (0x62 + SDW_BANK1_OFFSET) #define SDW_SCP_CLOCK_SCALE GENMASK(3, 0) /* PHY registers - CTRL and STAT are the same address */ #define SDW_SCP_PHY_OUT_CTRL_0 0x80 #define SDW_SCP_PHY_OUT_CTRL_1 0x81 #define SDW_SCP_PHY_OUT_CTRL_2 0x82 #define SDW_SCP_PHY_OUT_CTRL_3 0x83 #define SDW_SCP_PHY_OUT_CTRL_4 0x84 #define SDW_SCP_PHY_OUT_CTRL_5 0x85 #define SDW_SCP_PHY_OUT_CTRL_6 0x86 #define SDW_SCP_PHY_OUT_CTRL_7 0x87 #define SDW_SCP_CAP_LOAD_CTRL GENMASK(2, 0) #define SDW_SCP_DRIVE_STRENGTH_CTRL GENMASK(5, 3) #define SDW_SCP_SLEW_TIME_CTRL GENMASK(7, 6) /* Both INT and STATUS register is same */ #define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n)) #define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n)) #define SDW_DPN_PORTCTRL(n) (0x2 + SDW_DPN_SIZE * (n)) #define SDW_DPN_BLOCKCTRL1(n) (0x3 + SDW_DPN_SIZE * (n)) #define SDW_DPN_PREPARESTATUS(n) (0x4 + SDW_DPN_SIZE * (n)) #define SDW_DPN_PREPARECTRL(n) (0x5 + SDW_DPN_SIZE * (n)) #define SDW_DPN_INT_TEST_FAIL BIT(0) #define SDW_DPN_INT_PORT_READY BIT(1) #define SDW_DPN_INT_IMPDEF1 BIT(5) #define SDW_DPN_INT_IMPDEF2 BIT(6) #define SDW_DPN_INT_IMPDEF3 BIT(7) #define SDW_DPN_INTERRUPTS (SDW_DPN_INT_TEST_FAIL | \ SDW_DPN_INT_PORT_READY | \ SDW_DPN_INT_IMPDEF1 | \ SDW_DPN_INT_IMPDEF2 | \ SDW_DPN_INT_IMPDEF3) #define SDW_DPN_PORTCTRL_FLOWMODE GENMASK(1, 0) #define SDW_DPN_PORTCTRL_DATAMODE GENMASK(3, 2) #define SDW_DPN_PORTCTRL_NXTINVBANK BIT(4) #define SDW_DPN_BLOCKCTRL1_WDLEN GENMASK(5, 0) #define SDW_DPN_PREPARECTRL_CH_PREP GENMASK(7, 0) #define SDW_DPN_CHANNELEN_B0(n) (0x20 + SDW_DPN_SIZE * (n)) #define SDW_DPN_CHANNELEN_B1(n) (0x30 + SDW_DPN_SIZE * (n)) #define SDW_DPN_BLOCKCTRL2_B0(n) (0x21 + SDW_DPN_SIZE * (n)) #define SDW_DPN_BLOCKCTRL2_B1(n) (0x31 + SDW_DPN_SIZE * (n)) #define SDW_DPN_SAMPLECTRL1_B0(n) (0x22 + SDW_DPN_SIZE * (n)) #define SDW_DPN_SAMPLECTRL1_B1(n) (0x32 + SDW_DPN_SIZE * (n)) #define SDW_DPN_SAMPLECTRL2_B0(n) (0x23 + SDW_DPN_SIZE * (n)) #define SDW_DPN_SAMPLECTRL2_B1(n) (0x33 + SDW_DPN_SIZE * (n)) #define SDW_DPN_OFFSETCTRL1_B0(n) (0x24 + SDW_DPN_SIZE * (n)) #define SDW_DPN_OFFSETCTRL1_B1(n) (0x34 + SDW_DPN_SIZE * (n)) #define SDW_DPN_OFFSETCTRL2_B0(n) (0x25 + SDW_DPN_SIZE * (n)) #define SDW_DPN_OFFSETCTRL2_B1(n) (0x35 + SDW_DPN_SIZE * (n)) #define SDW_DPN_HCTRL_B0(n) (0x26 + SDW_DPN_SIZE * (n)) #define SDW_DPN_HCTRL_B1(n) (0x36 + SDW_DPN_SIZE * (n)) #define SDW_DPN_BLOCKCTRL3_B0(n) (0x27 + SDW_DPN_SIZE * (n)) #define SDW_DPN_BLOCKCTRL3_B1(n) (0x37 + SDW_DPN_SIZE * (n)) #define SDW_DPN_LANECTRL_B0(n) (0x28 + SDW_DPN_SIZE * (n)) #define SDW_DPN_LANECTRL_B1(n) (0x38 + SDW_DPN_SIZE * (n)) #define SDW_DPN_SAMPLECTRL_LOW GENMASK(7, 0) #define SDW_DPN_SAMPLECTRL_HIGH GENMASK(15, 8) #define SDW_DPN_HCTRL_HSTART GENMASK(7, 4) #define SDW_DPN_HCTRL_HSTOP GENMASK(3, 0) #define SDW_NUM_CASC_PORT_INTSTAT1 4 #define SDW_CASC_PORT_START_INTSTAT1 0 #define SDW_CASC_PORT_MASK_INTSTAT1 0x8 #define SDW_CASC_PORT_REG_OFFSET_INTSTAT1 0x0 #define SDW_NUM_CASC_PORT_INTSTAT2 7 #define SDW_CASC_PORT_START_INTSTAT2 4 #define SDW_CASC_PORT_MASK_INTSTAT2 1 #define SDW_CASC_PORT_REG_OFFSET_INTSTAT2 1 #define SDW_NUM_CASC_PORT_INTSTAT3 4 #define SDW_CASC_PORT_START_INTSTAT3 11 #define SDW_CASC_PORT_MASK_INTSTAT3 1 #define SDW_CASC_PORT_REG_OFFSET_INTSTAT3 2 /* * v1.2 device - SDCA address mapping * * Spec definition * Bits Contents * 31 0 (required by addressing range) * 30:26 0b10000 (Control Prefix) * 25 0 (Reserved) * 24:22 Function Number [2:0] * 21 Entity[6] * 20:19 Control Selector[5:4] * 18 0 (Reserved) * 17:15 Control Number[5:3] * 14 Next * 13 MBQ * 12:7 Entity[5:0] * 6:3 Control Selector[3:0] * 2:0 Control Number[2:0] */ #define SDW_SDCA_CTL(fun, ent, ctl, ch) (BIT(30) | \ (((fun) & 0x7) << 22) | \ (((ent) & 0x40) << 15) | \ (((ent) & 0x3f) << 7) | \ (((ctl) & 0x30) << 15) | \ (((ctl) & 0x0f) << 3) | \ (((ch) & 0x38) << 12) | \ ((ch) & 0x07)) #define SDW_SDCA_MBQ_CTL(reg) ((reg) | BIT(13)) #define SDW_SDCA_NEXT_CTL(reg) ((reg) | BIT(14)) #endif /* __SDW_REGISTERS_H */
/* * Copyright 2022 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #include "amdgpu.h" #include "amdgpu_lsdma.h" #define AMDGPU_LSDMA_MAX_SIZE 0x2000000ULL int amdgpu_lsdma_wait_for(struct amdgpu_device *adev, uint32_t reg_index, uint32_t reg_val, uint32_t mask) { uint32_t val; int i; for (i = 0; i < adev->usec_timeout; i++) { val = RREG32(reg_index); if ((val & mask) == reg_val) return 0; udelay(1); } return -ETIME; } int amdgpu_lsdma_copy_mem(struct amdgpu_device *adev, uint64_t src_addr, uint64_t dst_addr, uint64_t mem_size) { int ret; if (mem_size == 0) return -EINVAL; while (mem_size > 0) { uint64_t current_copy_size = min(mem_size, AMDGPU_LSDMA_MAX_SIZE); ret = adev->lsdma.funcs->copy_mem(adev, src_addr, dst_addr, current_copy_size); if (ret) return ret; src_addr += current_copy_size; dst_addr += current_copy_size; mem_size -= current_copy_size; } return 0; } int amdgpu_lsdma_fill_mem(struct amdgpu_device *adev, uint64_t dst_addr, uint32_t data, uint64_t mem_size) { int ret; if (mem_size == 0) return -EINVAL; while (mem_size > 0) { uint64_t current_fill_size = min(mem_size, AMDGPU_LSDMA_MAX_SIZE); ret = adev->lsdma.funcs->fill_mem(adev, dst_addr, data, current_fill_size); if (ret) return ret; dst_addr += current_fill_size; mem_size -= current_fill_size; } return 0; }
// SPDX-License-Identifier: GPL-2.0-only /* * MAX11410 SPI ADC driver * * Copyright 2022 Analog Devices Inc. */ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include <linux/spi/spi.h> #include <linux/unaligned.h> #include <linux/iio/buffer.h> #include <linux/iio/sysfs.h> #include <linux/iio/trigger.h> #include <linux/iio/trigger_consumer.h> #include <linux/iio/triggered_buffer.h> #define MAX11410_REG_CONV_START 0x01 #define MAX11410_CONV_TYPE_SINGLE 0x00 #define MAX11410_CONV_TYPE_CONTINUOUS 0x01 #define MAX11410_REG_CAL_START 0x03 #define MAX11410_CAL_START_SELF 0x00 #define MAX11410_CAL_START_PGA 0x01 #define MAX11410_REG_GPIO_CTRL(ch) ((ch) ? 0x05 : 0x04) #define MAX11410_GPIO_INTRB 0xC1 #define MAX11410_REG_FILTER 0x08 #define MAX11410_FILTER_RATE_MASK GENMASK(3, 0) #define MAX11410_FILTER_RATE_MAX 0x0F #define MAX11410_FILTER_LINEF_MASK GENMASK(5, 4) #define MAX11410_FILTER_50HZ BIT(5) #define MAX11410_FILTER_60HZ BIT(4) #define MAX11410_REG_CTRL 0x09 #define MAX11410_CTRL_REFSEL_MASK GENMASK(2, 0) #define MAX11410_CTRL_VREFN_BUF_BIT BIT(3) #define MAX11410_CTRL_VREFP_BUF_BIT BIT(4) #define MAX11410_CTRL_FORMAT_BIT BIT(5) #define MAX11410_CTRL_UNIPOLAR_BIT BIT(6) #define MAX11410_REG_MUX_CTRL0 0x0B #define MAX11410_REG_PGA 0x0E #define MAX11410_PGA_GAIN_MASK GENMASK(2, 0) #define MAX11410_PGA_SIG_PATH_MASK GENMASK(5, 4) #define MAX11410_PGA_SIG_PATH_BUFFERED 0x00 #define MAX11410_PGA_SIG_PATH_BYPASS 0x01 #define MAX11410_PGA_SIG_PATH_PGA 0x02 #define MAX11410_REG_DATA0 0x30 #define MAX11410_REG_STATUS 0x38 #define MAX11410_STATUS_CONV_READY_BIT BIT(0) #define MAX11410_STATUS_CAL_READY_BIT BIT(2) #define MAX11410_REFSEL_AVDD_AGND 0x03 #define MAX11410_REFSEL_MAX 0x06 #define MAX11410_SIG_PATH_MAX 0x02 #define MAX11410_CHANNEL_INDEX_MAX 0x0A #define MAX11410_AINP_AVDD 0x0A #define MAX11410_AINN_GND 0x0A #define MAX11410_CONVERSION_TIMEOUT_MS 2000 #define MAX11410_CALIB_TIMEOUT_MS 2000 #define MAX11410_SCALE_AVAIL_SIZE 8 enum max11410_filter { MAX11410_FILTER_FIR5060, MAX11410_FILTER_FIR50, MAX11410_FILTER_FIR60, MAX11410_FILTER_SINC4, }; static const u8 max11410_sampling_len[] = { [MAX11410_FILTER_FIR5060] = 5, [MAX11410_FILTER_FIR50] = 6, [MAX11410_FILTER_FIR60] = 6, [MAX11410_FILTER_SINC4] = 10, }; static const int max11410_sampling_rates[4][10][2] = { [MAX11410_FILTER_FIR5060] = { { 1, 100000 }, { 2, 100000 }, { 4, 200000 }, { 8, 400000 }, { 16, 800000 } }, [MAX11410_FILTER_FIR50] = { { 1, 300000 }, { 2, 700000 }, { 5, 300000 }, { 10, 700000 }, { 21, 300000 }, { 40 } }, [MAX11410_FILTER_FIR60] = { { 1, 300000 }, { 2, 700000 }, { 5, 300000 }, { 10, 700000 }, { 21, 300000 }, { 40 } }, [MAX11410_FILTER_SINC4] = { { 4 }, { 10 }, { 20 }, { 40 }, { 60 }, { 120 }, { 240 }, { 480 }, { 960 }, { 1920 } } }; struct max11410_channel_config { u32 settling_time_us; u32 *scale_avail; u8 refsel; u8 sig_path; u8 gain; bool bipolar; bool buffered_vrefp; bool buffered_vrefn; }; struct max11410_state { struct spi_device *spi_dev; struct iio_trigger *trig; struct completion completion; struct mutex lock; /* Prevent changing channel config during sampling */ struct regmap *regmap; struct regulator *avdd; struct regulator *vrefp[3]; struct regulator *vrefn[3]; struct max11410_channel_config *channels; int irq; struct { u32 data __aligned(IIO_DMA_MINALIGN); s64 ts __aligned(8); } scan; }; static const struct iio_chan_spec chanspec_template = { .type = IIO_VOLTAGE, .indexed = 1, .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_OFFSET), .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), .scan_type = { .sign = 's', .realbits = 24, .storagebits = 32, .endianness = IIO_LE, }, }; static unsigned int max11410_reg_size(unsigned int reg) { /* Registers from 0x00 to 0x10 are 1 byte, the rest are 3 bytes long. */ return reg <= 0x10 ? 1 : 3; } static int max11410_write_reg(struct max11410_state *st, unsigned int reg, unsigned int val) { /* This driver only needs to write 8-bit registers */ if (max11410_reg_size(reg) != 1) return -EINVAL; return regmap_write(st->regmap, reg, val); } static int max11410_read_reg(struct max11410_state *st, unsigned int reg, int *val) { int ret; if (max11410_reg_size(reg) == 3) { ret = regmap_bulk_read(st->regmap, reg, &st->scan.data, 3); if (ret) return ret; *val = get_unaligned_be24(&st->scan.data); return 0; } return regmap_read(st->regmap, reg, val); } static struct regulator *max11410_get_vrefp(struct max11410_state *st, u8 refsel) { refsel = refsel % 4; if (refsel == 3) return st->avdd; return st->vrefp[refsel]; } static struct regulator *max11410_get_vrefn(struct max11410_state *st, u8 refsel) { if (refsel > 2) return NULL; return st->vrefn[refsel]; } static const struct regmap_config regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0x39, }; static ssize_t max11410_notch_en_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct max11410_state *state = iio_priv(indio_dev); struct iio_dev_attr *iio_attr = to_iio_dev_attr(devattr); unsigned int val; int ret; ret = max11410_read_reg(state, MAX11410_REG_FILTER, &val); if (ret) return ret; switch (iio_attr->address) { case 0: val = !FIELD_GET(MAX11410_FILTER_50HZ, val); break; case 1: val = !FIELD_GET(MAX11410_FILTER_60HZ, val); break; case 2: val = FIELD_GET(MAX11410_FILTER_LINEF_MASK, val) == 3; break; default: return -EINVAL; } return sysfs_emit(buf, "%d\n", val); } static ssize_t max11410_notch_en_store(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct iio_dev_attr *iio_attr = to_iio_dev_attr(devattr); struct iio_dev *indio_dev = dev_get_drvdata(dev); struct max11410_state *state = iio_priv(indio_dev); unsigned int filter_bits; bool enable; int ret; ret = kstrtobool(buf, &enable); if (ret) return ret; switch (iio_attr->address) { case 0: filter_bits = MAX11410_FILTER_50HZ; break; case 1: filter_bits = MAX11410_FILTER_60HZ; break; case 2: default: filter_bits = MAX11410_FILTER_50HZ | MAX11410_FILTER_60HZ; enable = !enable; break; } if (enable) ret = regmap_clear_bits(state->regmap, MAX11410_REG_FILTER, filter_bits); else ret = regmap_set_bits(state->regmap, MAX11410_REG_FILTER, filter_bits); if (ret) return ret; return count; } static ssize_t in_voltage_filter2_notch_center_show(struct device *dev, struct device_attribute *devattr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct max11410_state *state = iio_priv(indio_dev); int ret, reg, rate, filter; ret = regmap_read(state->regmap, MAX11410_REG_FILTER, &reg); if (ret) return ret; rate = FIELD_GET(MAX11410_FILTER_RATE_MASK, reg); rate = clamp_val(rate, 0, max11410_sampling_len[MAX11410_FILTER_SINC4] - 1); filter = max11410_sampling_rates[MAX11410_FILTER_SINC4][rate][0]; return sysfs_emit(buf, "%d\n", filter); } static IIO_CONST_ATTR(in_voltage_filter0_notch_center, "50"); static IIO_CONST_ATTR(in_voltage_filter1_notch_center, "60"); static IIO_DEVICE_ATTR_RO(in_voltage_filter2_notch_center, 2); static IIO_DEVICE_ATTR(in_voltage_filter0_notch_en, 0644, max11410_notch_en_show, max11410_notch_en_store, 0); static IIO_DEVICE_ATTR(in_voltage_filter1_notch_en, 0644, max11410_notch_en_show, max11410_notch_en_store, 1); static IIO_DEVICE_ATTR(in_voltage_filter2_notch_en, 0644, max11410_notch_en_show, max11410_notch_en_store, 2); static struct attribute *max11410_attributes[] = { &iio_const_attr_in_voltage_filter0_notch_center.dev_attr.attr, &iio_const_attr_in_voltage_filter1_notch_center.dev_attr.attr, &iio_dev_attr_in_voltage_filter2_notch_center.dev_attr.attr, &iio_dev_attr_in_voltage_filter0_notch_en.dev_attr.attr, &iio_dev_attr_in_voltage_filter1_notch_en.dev_attr.attr, &iio_dev_attr_in_voltage_filter2_notch_en.dev_attr.attr, NULL }; static const struct attribute_group max11410_attribute_group = { .attrs = max11410_attributes, }; static int max11410_set_input_mux(struct max11410_state *st, u8 ainp, u8 ainn) { if (ainp > MAX11410_CHANNEL_INDEX_MAX || ainn > MAX11410_CHANNEL_INDEX_MAX) return -EINVAL; return max11410_write_reg(st, MAX11410_REG_MUX_CTRL0, (ainp << 4) | ainn); } static int max11410_configure_channel(struct max11410_state *st, struct iio_chan_spec const *chan) { struct max11410_channel_config cfg = st->channels[chan->address]; unsigned int regval; int ret; if (chan->differential) ret = max11410_set_input_mux(st, chan->channel, chan->channel2); else ret = max11410_set_input_mux(st, chan->channel, MAX11410_AINN_GND); if (ret) return ret; regval = FIELD_PREP(MAX11410_CTRL_VREFP_BUF_BIT, cfg.buffered_vrefp) | FIELD_PREP(MAX11410_CTRL_VREFN_BUF_BIT, cfg.buffered_vrefn) | FIELD_PREP(MAX11410_CTRL_REFSEL_MASK, cfg.refsel) | FIELD_PREP(MAX11410_CTRL_UNIPOLAR_BIT, cfg.bipolar ? 0 : 1); ret = regmap_update_bits(st->regmap, MAX11410_REG_CTRL, MAX11410_CTRL_REFSEL_MASK | MAX11410_CTRL_VREFP_BUF_BIT | MAX11410_CTRL_VREFN_BUF_BIT | MAX11410_CTRL_UNIPOLAR_BIT, regval); if (ret) return ret; regval = FIELD_PREP(MAX11410_PGA_SIG_PATH_MASK, cfg.sig_path) | FIELD_PREP(MAX11410_PGA_GAIN_MASK, cfg.gain); ret = regmap_write(st->regmap, MAX11410_REG_PGA, regval); if (ret) return ret; if (cfg.settling_time_us) fsleep(cfg.settling_time_us); return 0; } static int max11410_sample(struct max11410_state *st, int *sample_raw, struct iio_chan_spec const *chan) { int val, ret; ret = max11410_configure_channel(st, chan); if (ret) return ret; if (st->irq > 0) reinit_completion(&st->completion); /* Start Conversion */ ret = max11410_write_reg(st, MAX11410_REG_CONV_START, MAX11410_CONV_TYPE_SINGLE); if (ret) return ret; if (st->irq > 0) { /* Wait for an interrupt. */ ret = wait_for_completion_timeout(&st->completion, msecs_to_jiffies(MAX11410_CONVERSION_TIMEOUT_MS)); if (!ret) return -ETIMEDOUT; } else { int ret2; /* Wait for status register Conversion Ready flag */ ret = read_poll_timeout(max11410_read_reg, ret2, ret2 || (val & MAX11410_STATUS_CONV_READY_BIT), 5000, MAX11410_CONVERSION_TIMEOUT_MS * 1000, true, st, MAX11410_REG_STATUS, &val); if (ret) return ret; if (ret2) return ret2; } /* Read ADC Data */ return max11410_read_reg(st, MAX11410_REG_DATA0, sample_raw); } static int max11410_get_scale(struct max11410_state *state, struct max11410_channel_config cfg) { struct regulator *vrefp, *vrefn; int scale; vrefp = max11410_get_vrefp(state, cfg.refsel); scale = regulator_get_voltage(vrefp) / 1000; vrefn = max11410_get_vrefn(state, cfg.refsel); if (vrefn) scale -= regulator_get_voltage(vrefn) / 1000; if (cfg.bipolar) scale *= 2; return scale >> cfg.gain; } static int max11410_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long info) { struct max11410_state *state = iio_priv(indio_dev); struct max11410_channel_config cfg = state->channels[chan->address]; int ret, reg_val, filter, rate; switch (info) { case IIO_CHAN_INFO_SCALE: *val = max11410_get_scale(state, cfg); *val2 = chan->scan_type.realbits; return IIO_VAL_FRACTIONAL_LOG2; case IIO_CHAN_INFO_OFFSET: if (cfg.bipolar) *val = -BIT(chan->scan_type.realbits - 1); else *val = 0; return IIO_VAL_INT; case IIO_CHAN_INFO_RAW: ret = iio_device_claim_direct_mode(indio_dev); if (ret) return ret; mutex_lock(&state->lock); ret = max11410_sample(state, &reg_val, chan); mutex_unlock(&state->lock); iio_device_release_direct_mode(indio_dev); if (ret) return ret; *val = reg_val; return IIO_VAL_INT; case IIO_CHAN_INFO_SAMP_FREQ: ret = regmap_read(state->regmap, MAX11410_REG_FILTER, &reg_val); if (ret) return ret; filter = FIELD_GET(MAX11410_FILTER_LINEF_MASK, reg_val); rate = reg_val & MAX11410_FILTER_RATE_MASK; if (rate >= max11410_sampling_len[filter]) rate = max11410_sampling_len[filter] - 1; *val = max11410_sampling_rates[filter][rate][0]; *val2 = max11410_sampling_rates[filter][rate][1]; return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; } static int max11410_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct max11410_state *st = iio_priv(indio_dev); int i, ret, reg_val, filter, gain; u32 *scale_avail; switch (mask) { case IIO_CHAN_INFO_SCALE: scale_avail = st->channels[chan->address].scale_avail; if (!scale_avail) return -EOPNOTSUPP; /* Accept values in range 0.000001 <= scale < 1.000000 */ if (val != 0 || val2 == 0) return -EINVAL; ret = iio_device_claim_direct_mode(indio_dev); if (ret) return ret; /* Convert from INT_PLUS_MICRO to FRACTIONAL_LOG2 */ val2 = val2 * DIV_ROUND_CLOSEST(BIT(24), 1000000); val2 = DIV_ROUND_CLOSEST(scale_avail[0], val2); gain = order_base_2(val2); st->channels[chan->address].gain = clamp_val(gain, 0, 7); iio_device_release_direct_mode(indio_dev); return 0; case IIO_CHAN_INFO_SAMP_FREQ: ret = iio_device_claim_direct_mode(indio_dev); if (ret) return ret; mutex_lock(&st->lock); ret = regmap_read(st->regmap, MAX11410_REG_FILTER, &reg_val); if (ret) goto out; filter = FIELD_GET(MAX11410_FILTER_LINEF_MASK, reg_val); for (i = 0; i < max11410_sampling_len[filter]; ++i) { if (val == max11410_sampling_rates[filter][i][0] && val2 == max11410_sampling_rates[filter][i][1]) break; } if (i == max11410_sampling_len[filter]) { ret = -EINVAL; goto out; } ret = regmap_write_bits(st->regmap, MAX11410_REG_FILTER, MAX11410_FILTER_RATE_MASK, i); out: mutex_unlock(&st->lock); iio_device_release_direct_mode(indio_dev); return ret; default: return -EINVAL; } } static int max11410_read_avail(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, const int **vals, int *type, int *length, long info) { struct max11410_state *st = iio_priv(indio_dev); struct max11410_channel_config cfg; int ret, reg_val, filter; switch (info) { case IIO_CHAN_INFO_SAMP_FREQ: ret = regmap_read(st->regmap, MAX11410_REG_FILTER, &reg_val); if (ret) return ret; filter = FIELD_GET(MAX11410_FILTER_LINEF_MASK, reg_val); *vals = (const int *)max11410_sampling_rates[filter]; *length = max11410_sampling_len[filter] * 2; *type = IIO_VAL_INT_PLUS_MICRO; return IIO_AVAIL_LIST; case IIO_CHAN_INFO_SCALE: cfg = st->channels[chan->address]; if (!cfg.scale_avail) return -EINVAL; *vals = cfg.scale_avail; *length = MAX11410_SCALE_AVAIL_SIZE * 2; *type = IIO_VAL_FRACTIONAL_LOG2; return IIO_AVAIL_LIST; } return -EINVAL; } static const struct iio_info max11410_info = { .read_raw = max11410_read_raw, .write_raw = max11410_write_raw, .read_avail = max11410_read_avail, .attrs = &max11410_attribute_group, }; static irqreturn_t max11410_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct max11410_state *st = iio_priv(indio_dev); int ret; ret = max11410_read_reg(st, MAX11410_REG_DATA0, &st->scan.data); if (ret) { dev_err(&indio_dev->dev, "cannot read data\n"); goto out; } iio_push_to_buffers_with_timestamp(indio_dev, &st->scan, iio_get_time_ns(indio_dev)); out: iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; } static int max11410_buffer_postenable(struct iio_dev *indio_dev) { struct max11410_state *st = iio_priv(indio_dev); int scan_ch, ret; scan_ch = ffs(*indio_dev->active_scan_mask) - 1; ret = max11410_configure_channel(st, &indio_dev->channels[scan_ch]); if (ret) return ret; /* Start continuous conversion. */ return max11410_write_reg(st, MAX11410_REG_CONV_START, MAX11410_CONV_TYPE_CONTINUOUS); } static int max11410_buffer_predisable(struct iio_dev *indio_dev) { struct max11410_state *st = iio_priv(indio_dev); /* Stop continuous conversion. */ return max11410_write_reg(st, MAX11410_REG_CONV_START, MAX11410_CONV_TYPE_SINGLE); } static const struct iio_buffer_setup_ops max11410_buffer_ops = { .postenable = &max11410_buffer_postenable, .predisable = &max11410_buffer_predisable, .validate_scan_mask = &iio_validate_scan_mask_onehot, }; static const struct iio_trigger_ops max11410_trigger_ops = { .validate_device = iio_trigger_validate_own_device, }; static irqreturn_t max11410_interrupt(int irq, void *dev_id) { struct iio_dev *indio_dev = dev_id; struct max11410_state *st = iio_priv(indio_dev); if (iio_buffer_enabled(indio_dev)) iio_trigger_poll_nested(st->trig); else complete(&st->completion); return IRQ_HANDLED; }; static int max11410_parse_channels(struct max11410_state *st, struct iio_dev *indio_dev) { struct iio_chan_spec chanspec = chanspec_template; struct device *dev = &st->spi_dev->dev; struct max11410_channel_config *cfg; struct iio_chan_spec *channels; u32 reference, sig_path; const char *node_name; u32 inputs[2], scale; unsigned int num_ch; int chan_idx = 0; int ret, i; num_ch = device_get_child_node_count(dev); if (num_ch == 0) return dev_err_probe(&indio_dev->dev, -ENODEV, "FW has no channels defined\n"); /* Reserve space for soft timestamp channel */ num_ch++; channels = devm_kcalloc(dev, num_ch, sizeof(*channels), GFP_KERNEL); if (!channels) return -ENOMEM; st->channels = devm_kcalloc(dev, num_ch, sizeof(*st->channels), GFP_KERNEL); if (!st->channels) return -ENOMEM; device_for_each_child_node_scoped(dev, child) { node_name = fwnode_get_name(child); if (fwnode_property_present(child, "diff-channels")) { ret = fwnode_property_read_u32_array(child, "diff-channels", inputs, ARRAY_SIZE(inputs)); chanspec.differential = 1; } else { ret = fwnode_property_read_u32(child, "reg", &inputs[0]); inputs[1] = 0; chanspec.differential = 0; } if (ret) return ret; if (inputs[0] > MAX11410_CHANNEL_INDEX_MAX || inputs[1] > MAX11410_CHANNEL_INDEX_MAX) return dev_err_probe(&indio_dev->dev, -EINVAL, "Invalid channel index for %s, should be less than %d\n", node_name, MAX11410_CHANNEL_INDEX_MAX + 1); cfg = &st->channels[chan_idx]; reference = MAX11410_REFSEL_AVDD_AGND; fwnode_property_read_u32(child, "adi,reference", &reference); if (reference > MAX11410_REFSEL_MAX) return dev_err_probe(&indio_dev->dev, -EINVAL, "Invalid adi,reference value for %s, should be less than %d.\n", node_name, MAX11410_REFSEL_MAX + 1); if (!max11410_get_vrefp(st, reference) || (!max11410_get_vrefn(st, reference) && reference <= 2)) return dev_err_probe(&indio_dev->dev, -EINVAL, "Invalid VREF configuration for %s, either specify corresponding VREF regulators or change adi,reference property.\n", node_name); sig_path = MAX11410_PGA_SIG_PATH_BUFFERED; fwnode_property_read_u32(child, "adi,input-mode", &sig_path); if (sig_path > MAX11410_SIG_PATH_MAX) return dev_err_probe(&indio_dev->dev, -EINVAL, "Invalid adi,input-mode value for %s, should be less than %d.\n", node_name, MAX11410_SIG_PATH_MAX + 1); fwnode_property_read_u32(child, "settling-time-us", &cfg->settling_time_us); cfg->bipolar = fwnode_property_read_bool(child, "bipolar"); cfg->buffered_vrefp = fwnode_property_read_bool(child, "adi,buffered-vrefp"); cfg->buffered_vrefn = fwnode_property_read_bool(child, "adi,buffered-vrefn"); cfg->refsel = reference; cfg->sig_path = sig_path; cfg->gain = 0; /* Enable scale_available property if input mode is PGA */ if (sig_path == MAX11410_PGA_SIG_PATH_PGA) { __set_bit(IIO_CHAN_INFO_SCALE, &chanspec.info_mask_separate_available); cfg->scale_avail = devm_kcalloc(dev, MAX11410_SCALE_AVAIL_SIZE * 2, sizeof(*cfg->scale_avail), GFP_KERNEL); if (!cfg->scale_avail) return -ENOMEM; scale = max11410_get_scale(st, *cfg); for (i = 0; i < MAX11410_SCALE_AVAIL_SIZE; i++) { cfg->scale_avail[2 * i] = scale >> i; cfg->scale_avail[2 * i + 1] = chanspec.scan_type.realbits; } } else { __clear_bit(IIO_CHAN_INFO_SCALE, &chanspec.info_mask_separate_available); } chanspec.address = chan_idx; chanspec.scan_index = chan_idx; chanspec.channel = inputs[0]; chanspec.channel2 = inputs[1]; channels[chan_idx] = chanspec; chan_idx++; } channels[chan_idx] = (struct iio_chan_spec)IIO_CHAN_SOFT_TIMESTAMP(chan_idx); indio_dev->num_channels = chan_idx + 1; indio_dev->channels = channels; return 0; } static void max11410_disable_reg(void *reg) { regulator_disable(reg); } static int max11410_init_vref(struct device *dev, struct regulator **vref, const char *id) { struct regulator *reg; int ret; reg = devm_regulator_get_optional(dev, id); if (PTR_ERR(reg) == -ENODEV) { *vref = NULL; return 0; } else if (IS_ERR(reg)) { return PTR_ERR(reg); } ret = regulator_enable(reg); if (ret) return dev_err_probe(dev, ret, "Failed to enable regulator %s\n", id); *vref = reg; return devm_add_action_or_reset(dev, max11410_disable_reg, reg); } static int max11410_calibrate(struct max11410_state *st, u32 cal_type) { int ret, ret2, val; ret = max11410_write_reg(st, MAX11410_REG_CAL_START, cal_type); if (ret) return ret; /* Wait for status register Calibration Ready flag */ ret = read_poll_timeout(max11410_read_reg, ret2, ret2 || (val & MAX11410_STATUS_CAL_READY_BIT), 50000, MAX11410_CALIB_TIMEOUT_MS * 1000, true, st, MAX11410_REG_STATUS, &val); if (ret) return ret; return ret2; } static int max11410_self_calibrate(struct max11410_state *st) { int ret, i; ret = regmap_write_bits(st->regmap, MAX11410_REG_FILTER, MAX11410_FILTER_RATE_MASK, FIELD_PREP(MAX11410_FILTER_RATE_MASK, MAX11410_FILTER_RATE_MAX)); if (ret) return ret; ret = max11410_calibrate(st, MAX11410_CAL_START_SELF); if (ret) return ret; ret = regmap_write_bits(st->regmap, MAX11410_REG_PGA, MAX11410_PGA_SIG_PATH_MASK, FIELD_PREP(MAX11410_PGA_SIG_PATH_MASK, MAX11410_PGA_SIG_PATH_PGA)); if (ret) return ret; /* PGA calibrations */ for (i = 1; i < 8; ++i) { ret = regmap_write_bits(st->regmap, MAX11410_REG_PGA, MAX11410_PGA_GAIN_MASK, i); if (ret) return ret; ret = max11410_calibrate(st, MAX11410_CAL_START_PGA); if (ret) return ret; } /* Cleanup */ ret = regmap_write_bits(st->regmap, MAX11410_REG_PGA, MAX11410_PGA_GAIN_MASK, 0); if (ret) return ret; ret = regmap_write_bits(st->regmap, MAX11410_REG_FILTER, MAX11410_FILTER_RATE_MASK, 0); if (ret) return ret; return regmap_write_bits(st->regmap, MAX11410_REG_PGA, MAX11410_PGA_SIG_PATH_MASK, FIELD_PREP(MAX11410_PGA_SIG_PATH_MASK, MAX11410_PGA_SIG_PATH_BUFFERED)); } static int max11410_probe(struct spi_device *spi) { const char *vrefp_regs[] = { "vref0p", "vref1p", "vref2p" }; const char *vrefn_regs[] = { "vref0n", "vref1n", "vref2n" }; struct device *dev = &spi->dev; struct max11410_state *st; struct iio_dev *indio_dev; int ret, irqs[2]; int i; indio_dev = devm_iio_device_alloc(dev, sizeof(*st)); if (!indio_dev) return -ENOMEM; st = iio_priv(indio_dev); st->spi_dev = spi; init_completion(&st->completion); mutex_init(&st->lock); indio_dev->name = "max11410"; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->info = &max11410_info; st->regmap = devm_regmap_init_spi(spi, &regmap_config); if (IS_ERR(st->regmap)) return dev_err_probe(dev, PTR_ERR(st->regmap), "regmap initialization failed\n"); ret = max11410_init_vref(dev, &st->avdd, "avdd"); if (ret) return ret; for (i = 0; i < ARRAY_SIZE(vrefp_regs); i++) { ret = max11410_init_vref(dev, &st->vrefp[i], vrefp_regs[i]); if (ret) return ret; ret = max11410_init_vref(dev, &st->vrefn[i], vrefn_regs[i]); if (ret) return ret; } /* * Regulators must be configured before parsing channels for * validating "adi,reference" property of each channel. */ ret = max11410_parse_channels(st, indio_dev); if (ret) return ret; irqs[0] = fwnode_irq_get_byname(dev_fwnode(dev), "gpio0"); irqs[1] = fwnode_irq_get_byname(dev_fwnode(dev), "gpio1"); if (irqs[0] > 0) { st->irq = irqs[0]; ret = regmap_write(st->regmap, MAX11410_REG_GPIO_CTRL(0), MAX11410_GPIO_INTRB); } else if (irqs[1] > 0) { st->irq = irqs[1]; ret = regmap_write(st->regmap, MAX11410_REG_GPIO_CTRL(1), MAX11410_GPIO_INTRB); } else if (spi->irq > 0) { return dev_err_probe(dev, -ENODEV, "no interrupt name specified"); } if (ret) return ret; ret = regmap_set_bits(st->regmap, MAX11410_REG_CTRL, MAX11410_CTRL_FORMAT_BIT); if (ret) return ret; ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL, &max11410_trigger_handler, &max11410_buffer_ops); if (ret) return ret; if (st->irq > 0) { st->trig = devm_iio_trigger_alloc(dev, "%s-dev%d", indio_dev->name, iio_device_id(indio_dev)); if (!st->trig) return -ENOMEM; st->trig->ops = &max11410_trigger_ops; ret = devm_iio_trigger_register(dev, st->trig); if (ret) return ret; ret = devm_request_threaded_irq(dev, st->irq, NULL, &max11410_interrupt, IRQF_ONESHOT, "max11410", indio_dev); if (ret) return ret; } ret = max11410_self_calibrate(st); if (ret) return dev_err_probe(dev, ret, "cannot perform device self calibration\n"); return devm_iio_device_register(dev, indio_dev); } static const struct of_device_id max11410_spi_of_id[] = { { .compatible = "adi,max11410" }, { } }; MODULE_DEVICE_TABLE(of, max11410_spi_of_id); static const struct spi_device_id max11410_id[] = { { "max11410" }, { } }; MODULE_DEVICE_TABLE(spi, max11410_id); static struct spi_driver max11410_driver = { .driver = { .name = "max11410", .of_match_table = max11410_spi_of_id, }, .probe = max11410_probe, .id_table = max11410_id, }; module_spi_driver(max11410_driver); MODULE_AUTHOR("David Jung <[email protected]>"); MODULE_AUTHOR("Ibrahim Tilki <[email protected]>"); MODULE_DESCRIPTION("Analog Devices MAX11410 ADC"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* * Battery and Power Management code for the Sharp SL-5x00 * * Copyright (C) 2009 Thomas Kunze * * based on tosa_battery.c */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/power_supply.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/gpio/driver.h> #include <linux/gpio/machine.h> #include <linux/gpio/consumer.h> #include <linux/mfd/ucb1x00.h> #include <asm/mach/sharpsl_param.h> #include <asm/mach-types.h> #include <mach/collie.h> static DEFINE_MUTEX(bat_lock); /* protects gpio pins */ static struct work_struct bat_work; static struct ucb1x00 *ucb; struct collie_bat { int status; struct power_supply *psy; int full_chrg; struct mutex work_lock; /* protects data */ bool (*is_present)(struct collie_bat *bat); struct gpio_desc *gpio_full; struct gpio_desc *gpio_charge_on; int technology; struct gpio_desc *gpio_bat; int adc_bat; int adc_bat_divider; int bat_max; int bat_min; struct gpio_desc *gpio_temp; int adc_temp; int adc_temp_divider; }; static struct collie_bat collie_bat_main; static unsigned long collie_read_bat(struct collie_bat *bat) { unsigned long value = 0; if (!bat->gpio_bat || bat->adc_bat < 0) return 0; mutex_lock(&bat_lock); gpiod_set_value(bat->gpio_bat, 1); msleep(5); ucb1x00_adc_enable(ucb); value = ucb1x00_adc_read(ucb, bat->adc_bat, UCB_SYNC); ucb1x00_adc_disable(ucb); gpiod_set_value(bat->gpio_bat, 0); mutex_unlock(&bat_lock); value = value * 1000000 / bat->adc_bat_divider; return value; } static unsigned long collie_read_temp(struct collie_bat *bat) { unsigned long value = 0; if (!bat->gpio_temp || bat->adc_temp < 0) return 0; mutex_lock(&bat_lock); gpiod_set_value(bat->gpio_temp, 1); msleep(5); ucb1x00_adc_enable(ucb); value = ucb1x00_adc_read(ucb, bat->adc_temp, UCB_SYNC); ucb1x00_adc_disable(ucb); gpiod_set_value(bat->gpio_temp, 0); mutex_unlock(&bat_lock); value = value * 10000 / bat->adc_temp_divider; return value; } static int collie_bat_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret = 0; struct collie_bat *bat = power_supply_get_drvdata(psy); if (bat->is_present && !bat->is_present(bat) && psp != POWER_SUPPLY_PROP_PRESENT) { return -ENODEV; } switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = bat->status; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = bat->technology; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: val->intval = collie_read_bat(bat); break; case POWER_SUPPLY_PROP_VOLTAGE_MAX: if (bat->full_chrg == -1) val->intval = bat->bat_max; else val->intval = bat->full_chrg; break; case POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN: val->intval = bat->bat_max; break; case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: val->intval = bat->bat_min; break; case POWER_SUPPLY_PROP_TEMP: val->intval = collie_read_temp(bat); break; case POWER_SUPPLY_PROP_PRESENT: val->intval = bat->is_present ? bat->is_present(bat) : 1; break; default: ret = -EINVAL; break; } return ret; } static void collie_bat_external_power_changed(struct power_supply *psy) { schedule_work(&bat_work); } static irqreturn_t collie_bat_gpio_isr(int irq, void *data) { pr_info("collie_bat_gpio irq\n"); schedule_work(&bat_work); return IRQ_HANDLED; } static void collie_bat_update(struct collie_bat *bat) { int old; struct power_supply *psy = bat->psy; mutex_lock(&bat->work_lock); old = bat->status; if (bat->is_present && !bat->is_present(bat)) { printk(KERN_NOTICE "%s not present\n", psy->desc->name); bat->status = POWER_SUPPLY_STATUS_UNKNOWN; bat->full_chrg = -1; } else if (power_supply_am_i_supplied(psy)) { if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING) { gpiod_set_value(bat->gpio_charge_on, 1); mdelay(15); } if (gpiod_get_value(bat->gpio_full)) { if (old == POWER_SUPPLY_STATUS_CHARGING || bat->full_chrg == -1) bat->full_chrg = collie_read_bat(bat); gpiod_set_value(bat->gpio_charge_on, 0); bat->status = POWER_SUPPLY_STATUS_FULL; } else { gpiod_set_value(bat->gpio_charge_on, 1); bat->status = POWER_SUPPLY_STATUS_CHARGING; } } else { gpiod_set_value(bat->gpio_charge_on, 0); bat->status = POWER_SUPPLY_STATUS_DISCHARGING; } if (old != bat->status) power_supply_changed(psy); mutex_unlock(&bat->work_lock); } static void collie_bat_work(struct work_struct *work) { collie_bat_update(&collie_bat_main); } static enum power_supply_property collie_bat_main_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TEMP, }; static enum power_supply_property collie_bat_bu_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, POWER_SUPPLY_PROP_VOLTAGE_MAX, POWER_SUPPLY_PROP_PRESENT, }; static const struct power_supply_desc collie_bat_main_desc = { .name = "main-battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = collie_bat_main_props, .num_properties = ARRAY_SIZE(collie_bat_main_props), .get_property = collie_bat_get_property, .external_power_changed = collie_bat_external_power_changed, .use_for_apm = 1, }; static struct collie_bat collie_bat_main = { .status = POWER_SUPPLY_STATUS_DISCHARGING, .full_chrg = -1, .psy = NULL, .gpio_full = NULL, .gpio_charge_on = NULL, .technology = POWER_SUPPLY_TECHNOLOGY_LIPO, .gpio_bat = NULL, .adc_bat = UCB_ADC_INP_AD1, .adc_bat_divider = 155, .bat_max = 4310000, .bat_min = 1551 * 1000000 / 414, .gpio_temp = NULL, .adc_temp = UCB_ADC_INP_AD0, .adc_temp_divider = 10000, }; static const struct power_supply_desc collie_bat_bu_desc = { .name = "backup-battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = collie_bat_bu_props, .num_properties = ARRAY_SIZE(collie_bat_bu_props), .get_property = collie_bat_get_property, .external_power_changed = collie_bat_external_power_changed, }; static struct collie_bat collie_bat_bu = { .status = POWER_SUPPLY_STATUS_UNKNOWN, .full_chrg = -1, .psy = NULL, .gpio_full = NULL, .gpio_charge_on = NULL, .technology = POWER_SUPPLY_TECHNOLOGY_LiMn, .gpio_bat = NULL, .adc_bat = UCB_ADC_INP_AD1, .adc_bat_divider = 155, .bat_max = 3000000, .bat_min = 1900000, .gpio_temp = NULL, .adc_temp = -1, .adc_temp_divider = -1, }; /* Obtained but unused GPIO */ static struct gpio_desc *collie_mbat_low; #ifdef CONFIG_PM static int wakeup_enabled; static int collie_bat_suspend(struct ucb1x00_dev *dev) { /* flush all pending status updates */ flush_work(&bat_work); if (device_may_wakeup(&dev->ucb->dev) && collie_bat_main.status == POWER_SUPPLY_STATUS_CHARGING) wakeup_enabled = !enable_irq_wake(gpiod_to_irq(collie_bat_main.gpio_full)); else wakeup_enabled = 0; return 0; } static int collie_bat_resume(struct ucb1x00_dev *dev) { if (wakeup_enabled) disable_irq_wake(gpiod_to_irq(collie_bat_main.gpio_full)); /* things may have changed while we were away */ schedule_work(&bat_work); return 0; } #else #define collie_bat_suspend NULL #define collie_bat_resume NULL #endif static int collie_bat_probe(struct ucb1x00_dev *dev) { int ret; struct power_supply_config psy_main_cfg = {}, psy_bu_cfg = {}; struct gpio_chip *gc = &dev->ucb->gpio; if (!machine_is_collie()) return -ENODEV; ucb = dev->ucb; /* Obtain all the main battery GPIOs */ collie_bat_main.gpio_full = gpiod_get(&dev->ucb->dev, "main battery full", GPIOD_IN); if (IS_ERR(collie_bat_main.gpio_full)) return PTR_ERR(collie_bat_main.gpio_full); collie_mbat_low = gpiod_get(&dev->ucb->dev, "main battery low", GPIOD_IN); if (IS_ERR(collie_mbat_low)) { ret = PTR_ERR(collie_mbat_low); goto err_put_gpio_full; } collie_bat_main.gpio_charge_on = gpiod_get(&dev->ucb->dev, "main charge on", GPIOD_OUT_LOW); if (IS_ERR(collie_bat_main.gpio_charge_on)) { ret = PTR_ERR(collie_bat_main.gpio_charge_on); goto err_put_mbat_low; } /* COLLIE_GPIO_MBAT_ON = GPIO 7 on the UCB (TC35143) */ collie_bat_main.gpio_bat = gpiochip_request_own_desc(gc, 7, "main battery", GPIO_ACTIVE_HIGH, GPIOD_OUT_LOW); if (IS_ERR(collie_bat_main.gpio_bat)) { ret = PTR_ERR(collie_bat_main.gpio_bat); goto err_put_gpio_charge_on; } /* COLLIE_GPIO_TMP_ON = GPIO 9 on the UCB (TC35143) */ collie_bat_main.gpio_temp = gpiochip_request_own_desc(gc, 9, "main battery temp", GPIO_ACTIVE_HIGH, GPIOD_OUT_LOW); if (IS_ERR(collie_bat_main.gpio_temp)) { ret = PTR_ERR(collie_bat_main.gpio_temp); goto err_free_gpio_bat; } /* * Obtain the backup battery COLLIE_GPIO_BBAT_ON which is * GPIO 8 on the UCB (TC35143) */ collie_bat_bu.gpio_bat = gpiochip_request_own_desc(gc, 8, "backup battery", GPIO_ACTIVE_HIGH, GPIOD_OUT_LOW); if (IS_ERR(collie_bat_bu.gpio_bat)) { ret = PTR_ERR(collie_bat_bu.gpio_bat); goto err_free_gpio_temp; } mutex_init(&collie_bat_main.work_lock); INIT_WORK(&bat_work, collie_bat_work); psy_main_cfg.drv_data = &collie_bat_main; collie_bat_main.psy = power_supply_register(&dev->ucb->dev, &collie_bat_main_desc, &psy_main_cfg); if (IS_ERR(collie_bat_main.psy)) { ret = PTR_ERR(collie_bat_main.psy); goto err_psy_reg_main; } psy_bu_cfg.drv_data = &collie_bat_bu; collie_bat_bu.psy = power_supply_register(&dev->ucb->dev, &collie_bat_bu_desc, &psy_bu_cfg); if (IS_ERR(collie_bat_bu.psy)) { ret = PTR_ERR(collie_bat_bu.psy); goto err_psy_reg_bu; } ret = request_irq(gpiod_to_irq(collie_bat_main.gpio_full), collie_bat_gpio_isr, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, "main full", &collie_bat_main); if (ret) goto err_irq; device_init_wakeup(&ucb->dev, 1); schedule_work(&bat_work); return 0; err_irq: power_supply_unregister(collie_bat_bu.psy); err_psy_reg_bu: power_supply_unregister(collie_bat_main.psy); err_psy_reg_main: /* see comment in collie_bat_remove */ cancel_work_sync(&bat_work); gpiochip_free_own_desc(collie_bat_bu.gpio_bat); err_free_gpio_temp: gpiochip_free_own_desc(collie_bat_main.gpio_temp); err_free_gpio_bat: gpiochip_free_own_desc(collie_bat_main.gpio_bat); err_put_gpio_charge_on: gpiod_put(collie_bat_main.gpio_charge_on); err_put_mbat_low: gpiod_put(collie_mbat_low); err_put_gpio_full: gpiod_put(collie_bat_main.gpio_full); return ret; } static void collie_bat_remove(struct ucb1x00_dev *dev) { free_irq(gpiod_to_irq(collie_bat_main.gpio_full), &collie_bat_main); power_supply_unregister(collie_bat_bu.psy); power_supply_unregister(collie_bat_main.psy); /* These are obtained from the machine */ gpiod_put(collie_bat_main.gpio_full); gpiod_put(collie_mbat_low); gpiod_put(collie_bat_main.gpio_charge_on); /* These are directly from the UCB so let's free them */ gpiochip_free_own_desc(collie_bat_main.gpio_bat); gpiochip_free_own_desc(collie_bat_main.gpio_temp); gpiochip_free_own_desc(collie_bat_bu.gpio_bat); /* * Now cancel the bat_work. We won't get any more schedules, * since all sources (isr and external_power_changed) are * unregistered now. */ cancel_work_sync(&bat_work); } static struct ucb1x00_driver collie_bat_driver = { .add = collie_bat_probe, .remove = collie_bat_remove, .suspend = collie_bat_suspend, .resume = collie_bat_resume, }; static int __init collie_bat_init(void) { return ucb1x00_register_driver(&collie_bat_driver); } static void __exit collie_bat_exit(void) { ucb1x00_unregister_driver(&collie_bat_driver); } module_init(collie_bat_init); module_exit(collie_bat_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Thomas Kunze"); MODULE_DESCRIPTION("Collie battery driver");
/* sis190.c: Silicon Integrated Systems SiS190 ethernet driver Copyright (c) 2003 K.M. Liu <[email protected]> Copyright (c) 2003, 2004 Jeff Garzik <[email protected]> Copyright (c) 2003, 2004, 2005 Francois Romieu <[email protected]> Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191 genuine driver. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. See the file COPYING in this distribution for more information. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/interrupt.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/mii.h> #include <linux/delay.h> #include <linux/crc32.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <asm/irq.h> #define PHY_MAX_ADDR 32 #define PHY_ID_ANY 0x1f #define MII_REG_ANY 0x1f #define DRV_VERSION "1.4" #define DRV_NAME "sis190" #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION #define sis190_rx_skb netif_rx #define sis190_rx_quota(count, quota) count #define NUM_TX_DESC 64 /* [8..1024] */ #define NUM_RX_DESC 64 /* [8..8192] */ #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) #define RX_BUF_SIZE 1536 #define RX_BUF_MASK 0xfff8 #define SIS190_REGS_SIZE 0x80 #define SIS190_TX_TIMEOUT (6*HZ) #define SIS190_PHY_TIMEOUT (10*HZ) #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ NETIF_MSG_LINK | NETIF_MSG_IFUP | \ NETIF_MSG_IFDOWN) /* Enhanced PHY access register bit definitions */ #define EhnMIIread 0x0000 #define EhnMIIwrite 0x0020 #define EhnMIIdataShift 16 #define EhnMIIpmdShift 6 /* 7016 only */ #define EhnMIIregShift 11 #define EhnMIIreq 0x0010 #define EhnMIInotDone 0x0010 /* Write/read MMIO register */ #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg)) #define SIS_W16(reg, val) writew ((val), ioaddr + (reg)) #define SIS_W32(reg, val) writel ((val), ioaddr + (reg)) #define SIS_R8(reg) readb (ioaddr + (reg)) #define SIS_R16(reg) readw (ioaddr + (reg)) #define SIS_R32(reg) readl (ioaddr + (reg)) #define SIS_PCI_COMMIT() SIS_R32(IntrControl) enum sis190_registers { TxControl = 0x00, TxDescStartAddr = 0x04, rsv0 = 0x08, // reserved TxSts = 0x0c, // unused (Control/Status) RxControl = 0x10, RxDescStartAddr = 0x14, rsv1 = 0x18, // reserved RxSts = 0x1c, // unused IntrStatus = 0x20, IntrMask = 0x24, IntrControl = 0x28, IntrTimer = 0x2c, // unused (Interrupt Timer) PMControl = 0x30, // unused (Power Mgmt Control/Status) rsv2 = 0x34, // reserved ROMControl = 0x38, ROMInterface = 0x3c, StationControl = 0x40, GMIIControl = 0x44, GIoCR = 0x48, // unused (GMAC IO Compensation) GIoCtrl = 0x4c, // unused (GMAC IO Control) TxMacControl = 0x50, TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit) RGDelay = 0x58, // unused (RGMII Tx Internal Delay) rsv3 = 0x5c, // reserved RxMacControl = 0x60, RxMacAddr = 0x62, RxHashTable = 0x68, // Undocumented = 0x6c, RxWolCtrl = 0x70, RxWolData = 0x74, // unused (Rx WOL Data Access) RxMPSControl = 0x78, // unused (Rx MPS Control) rsv4 = 0x7c, // reserved }; enum sis190_register_content { /* IntrStatus */ SoftInt = 0x40000000, // unused Timeup = 0x20000000, // unused PauseFrame = 0x00080000, // unused MagicPacket = 0x00040000, // unused WakeupFrame = 0x00020000, // unused LinkChange = 0x00010000, RxQEmpty = 0x00000080, RxQInt = 0x00000040, TxQ1Empty = 0x00000020, // unused TxQ1Int = 0x00000010, TxQ0Empty = 0x00000008, // unused TxQ0Int = 0x00000004, RxHalt = 0x00000002, TxHalt = 0x00000001, /* {Rx/Tx}CmdBits */ CmdReset = 0x10, CmdRxEnb = 0x08, // unused CmdTxEnb = 0x01, RxBufEmpty = 0x01, // unused /* Cfg9346Bits */ Cfg9346_Lock = 0x00, // unused Cfg9346_Unlock = 0xc0, // unused /* RxMacControl */ AcceptErr = 0x20, // unused AcceptRunt = 0x10, // unused AcceptBroadcast = 0x0800, AcceptMulticast = 0x0400, AcceptMyPhys = 0x0200, AcceptAllPhys = 0x0100, /* RxConfigBits */ RxCfgFIFOShift = 13, RxCfgDMAShift = 8, // 0x1a in RxControl ? /* TxConfigBits */ TxInterFrameGapShift = 24, TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ LinkStatus = 0x02, // unused FullDup = 0x01, // unused /* TBICSRBit */ TBILinkOK = 0x02000000, // unused }; struct TxDesc { __le32 PSize; __le32 status; __le32 addr; __le32 size; }; struct RxDesc { __le32 PSize; __le32 status; __le32 addr; __le32 size; }; enum _DescStatusBit { /* _Desc.status */ OWNbit = 0x80000000, // RXOWN/TXOWN INTbit = 0x40000000, // RXINT/TXINT CRCbit = 0x00020000, // CRCOFF/CRCEN PADbit = 0x00010000, // PREADD/PADEN /* _Desc.size */ RingEnd = 0x80000000, /* TxDesc.status */ LSEN = 0x08000000, // TSO ? -- FR IPCS = 0x04000000, TCPCS = 0x02000000, UDPCS = 0x01000000, BSTEN = 0x00800000, EXTEN = 0x00400000, DEFEN = 0x00200000, BKFEN = 0x00100000, CRSEN = 0x00080000, COLEN = 0x00040000, THOL3 = 0x30000000, THOL2 = 0x20000000, THOL1 = 0x10000000, THOL0 = 0x00000000, WND = 0x00080000, TABRT = 0x00040000, FIFO = 0x00020000, LINK = 0x00010000, ColCountMask = 0x0000ffff, /* RxDesc.status */ IPON = 0x20000000, TCPON = 0x10000000, UDPON = 0x08000000, Wakup = 0x00400000, Magic = 0x00200000, Pause = 0x00100000, DEFbit = 0x00200000, BCAST = 0x000c0000, MCAST = 0x00080000, UCAST = 0x00040000, /* RxDesc.PSize */ TAGON = 0x80000000, RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR ABORT = 0x00800000, SHORT = 0x00400000, LIMIT = 0x00200000, MIIER = 0x00100000, OVRUN = 0x00080000, NIBON = 0x00040000, COLON = 0x00020000, CRCOK = 0x00010000, RxSizeMask = 0x0000ffff /* * The asic could apparently do vlan, TSO, jumbo (sis191 only) and * provide two (unused with Linux) Tx queues. No publicly * available documentation alas. */ }; enum sis190_eeprom_access_register_bits { EECS = 0x00000001, // unused EECLK = 0x00000002, // unused EEDO = 0x00000008, // unused EEDI = 0x00000004, // unused EEREQ = 0x00000080, EEROP = 0x00000200, EEWOP = 0x00000100 // unused }; /* EEPROM Addresses */ enum sis190_eeprom_address { EEPROMSignature = 0x00, EEPROMCLK = 0x01, // unused EEPROMInfo = 0x02, EEPROMMACAddr = 0x03 }; enum sis190_feature { F_HAS_RGMII = 1, F_PHY_88E1111 = 2, F_PHY_BCM5461 = 4 }; struct sis190_private { void __iomem *mmio_addr; struct pci_dev *pci_dev; struct net_device *dev; spinlock_t lock; u32 rx_buf_sz; u32 cur_rx; u32 cur_tx; u32 dirty_rx; u32 dirty_tx; dma_addr_t rx_dma; dma_addr_t tx_dma; struct RxDesc *RxDescRing; struct TxDesc *TxDescRing; struct sk_buff *Rx_skbuff[NUM_RX_DESC]; struct sk_buff *Tx_skbuff[NUM_TX_DESC]; struct work_struct phy_task; struct timer_list timer; u32 msg_enable; struct mii_if_info mii_if; struct list_head first_phy; u32 features; u32 negotiated_lpa; enum { LNK_OFF, LNK_ON, LNK_AUTONEG, } link_status; }; struct sis190_phy { struct list_head list; int phy_id; u16 id[2]; u16 status; u8 type; }; enum sis190_phy_type { UNKNOWN = 0x00, HOME = 0x01, LAN = 0x02, MIX = 0x03 }; static struct mii_chip_info { const char *name; u16 id[2]; unsigned int type; u32 feature; } mii_chip_table[] = { { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 }, { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 }, { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 }, { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 }, { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 }, { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 }, { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 }, { NULL, } }; static const struct { const char *name; } sis_chip_info[] = { { "SiS 190 PCI Fast Ethernet adapter" }, { "SiS 191 PCI Gigabit Ethernet adapter" }, }; static const struct pci_device_id sis190_pci_tbl[] = { { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, sis190_pci_tbl); static int rx_copybreak = 200; static struct { u32 msg_enable; } debug = { -1 }; MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver"); module_param(rx_copybreak, int, 0); MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); MODULE_AUTHOR("K.M. Liu <[email protected]>, Ueimor <[email protected]>"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); static const u32 sis190_intr_mask = RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange; /* * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * The chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) { unsigned int i; SIS_W32(GMIIControl, ctl); msleep(1); for (i = 0; i < 100; i++) { if (!(SIS_R32(GMIIControl) & EhnMIInotDone)) break; msleep(1); } if (i > 99) pr_err("PHY command failed !\n"); } static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val) { __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite | (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) | (((u32) val) << EhnMIIdataShift)); } static int mdio_read(void __iomem *ioaddr, int phy_id, int reg) { __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread | (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift)); return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift); } static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val) { struct sis190_private *tp = netdev_priv(dev); mdio_write(tp->mmio_addr, phy_id, reg, val); } static int __mdio_read(struct net_device *dev, int phy_id, int reg) { struct sis190_private *tp = netdev_priv(dev); return mdio_read(tp->mmio_addr, phy_id, reg); } static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg) { mdio_read(ioaddr, phy_id, reg); return mdio_read(ioaddr, phy_id, reg); } static u16 sis190_read_eeprom(void __iomem *ioaddr, u32 reg) { u16 data = 0xffff; unsigned int i; if (!(SIS_R32(ROMControl) & 0x0002)) return 0; SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10)); for (i = 0; i < 200; i++) { if (!(SIS_R32(ROMInterface) & EEREQ)) { data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16; break; } msleep(1); } return data; } static void sis190_irq_mask_and_ack(void __iomem *ioaddr) { SIS_W32(IntrMask, 0x00); SIS_W32(IntrStatus, 0xffffffff); SIS_PCI_COMMIT(); } static void sis190_asic_down(void __iomem *ioaddr) { /* Stop the chip's Tx and Rx DMA processes. */ SIS_W32(TxControl, 0x1a00); SIS_W32(RxControl, 0x1a00); sis190_irq_mask_and_ack(ioaddr); } static void sis190_mark_as_last_descriptor(struct RxDesc *desc) { desc->size |= cpu_to_le32(RingEnd); } static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz) { u32 eor = le32_to_cpu(desc->size) & RingEnd; desc->PSize = 0x0; desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor); wmb(); desc->status = cpu_to_le32(OWNbit | INTbit); } static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, u32 rx_buf_sz) { desc->addr = cpu_to_le32(mapping); sis190_give_to_asic(desc, rx_buf_sz); } static inline void sis190_make_unusable_by_asic(struct RxDesc *desc) { desc->PSize = 0x0; desc->addr = cpu_to_le32(0xdeadbeef); desc->size &= cpu_to_le32(RingEnd); wmb(); desc->status = 0x0; } static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp, struct RxDesc *desc) { u32 rx_buf_sz = tp->rx_buf_sz; struct sk_buff *skb; dma_addr_t mapping; skb = netdev_alloc_skb(tp->dev, rx_buf_sz); if (unlikely(!skb)) goto skb_alloc_failed; mapping = dma_map_single(&tp->pci_dev->dev, skb->data, tp->rx_buf_sz, DMA_FROM_DEVICE); if (dma_mapping_error(&tp->pci_dev->dev, mapping)) goto out; sis190_map_to_asic(desc, mapping, rx_buf_sz); return skb; out: dev_kfree_skb_any(skb); skb_alloc_failed: sis190_make_unusable_by_asic(desc); return NULL; } static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, u32 start, u32 end) { u32 cur; for (cur = start; cur < end; cur++) { unsigned int i = cur % NUM_RX_DESC; if (tp->Rx_skbuff[i]) continue; tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i); if (!tp->Rx_skbuff[i]) break; } return cur - start; } static bool sis190_try_rx_copy(struct sis190_private *tp, struct sk_buff **sk_buff, int pkt_size, dma_addr_t addr) { struct sk_buff *skb; bool done = false; if (pkt_size >= rx_copybreak) goto out; skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); if (!skb) goto out; dma_sync_single_for_cpu(&tp->pci_dev->dev, addr, tp->rx_buf_sz, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); *sk_buff = skb; done = true; out: return done; } static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) { #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT) if ((status & CRCOK) && !(status & ErrMask)) return 0; if (!(status & CRCOK)) stats->rx_crc_errors++; else if (status & OVRUN) stats->rx_over_errors++; else if (status & (SHORT | LIMIT)) stats->rx_length_errors++; else if (status & (MIIER | NIBON | COLON)) stats->rx_frame_errors++; stats->rx_errors++; return -1; } static int sis190_rx_interrupt(struct net_device *dev, struct sis190_private *tp, void __iomem *ioaddr) { struct net_device_stats *stats = &dev->stats; u32 rx_left, cur_rx = tp->cur_rx; u32 delta, count; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; rx_left = sis190_rx_quota(rx_left, (u32) dev->quota); for (; rx_left > 0; rx_left--, cur_rx++) { unsigned int entry = cur_rx % NUM_RX_DESC; struct RxDesc *desc = tp->RxDescRing + entry; u32 status; if (le32_to_cpu(desc->status) & OWNbit) break; status = le32_to_cpu(desc->PSize); //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status); if (sis190_rx_pkt_err(status, stats) < 0) sis190_give_to_asic(desc, tp->rx_buf_sz); else { struct sk_buff *skb = tp->Rx_skbuff[entry]; dma_addr_t addr = le32_to_cpu(desc->addr); int pkt_size = (status & RxSizeMask) - 4; struct pci_dev *pdev = tp->pci_dev; if (unlikely(pkt_size > tp->rx_buf_sz)) { netif_info(tp, intr, dev, "(frag) status = %08x\n", status); stats->rx_dropped++; stats->rx_length_errors++; sis190_give_to_asic(desc, tp->rx_buf_sz); continue; } if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) { dma_sync_single_for_device(&pdev->dev, addr, tp->rx_buf_sz, DMA_FROM_DEVICE); sis190_give_to_asic(desc, tp->rx_buf_sz); } else { dma_unmap_single(&pdev->dev, addr, tp->rx_buf_sz, DMA_FROM_DEVICE); tp->Rx_skbuff[entry] = NULL; sis190_make_unusable_by_asic(desc); } skb_put(skb, pkt_size); skb->protocol = eth_type_trans(skb, dev); sis190_rx_skb(skb); stats->rx_packets++; stats->rx_bytes += pkt_size; if ((status & BCAST) == MCAST) stats->multicast++; } } count = cur_rx - tp->cur_rx; tp->cur_rx = cur_rx; delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); if (!delta && count) netif_info(tp, intr, dev, "no Rx buffer allocated\n"); tp->dirty_rx += delta; if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) netif_emerg(tp, intr, dev, "Rx buffers exhausted\n"); return count; } static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct TxDesc *desc) { unsigned int len; len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), len, DMA_TO_DEVICE); memset(desc, 0x00, sizeof(*desc)); } static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats) { #define TxErrMask (WND | TABRT | FIFO | LINK) if (!unlikely(status & TxErrMask)) return 0; if (status & WND) stats->tx_window_errors++; if (status & TABRT) stats->tx_aborted_errors++; if (status & FIFO) stats->tx_fifo_errors++; if (status & LINK) stats->tx_carrier_errors++; stats->tx_errors++; return -1; } static void sis190_tx_interrupt(struct net_device *dev, struct sis190_private *tp, void __iomem *ioaddr) { struct net_device_stats *stats = &dev->stats; u32 pending, dirty_tx = tp->dirty_tx; /* * It would not be needed if queueing was allowed to be enabled * again too early (hint: think preempt and unclocked smp systems). */ unsigned int queue_stopped; smp_rmb(); pending = tp->cur_tx - dirty_tx; queue_stopped = (pending == NUM_TX_DESC); for (; pending; pending--, dirty_tx++) { unsigned int entry = dirty_tx % NUM_TX_DESC; struct TxDesc *txd = tp->TxDescRing + entry; u32 status = le32_to_cpu(txd->status); struct sk_buff *skb; if (status & OWNbit) break; skb = tp->Tx_skbuff[entry]; if (likely(sis190_tx_pkt_err(status, stats) == 0)) { stats->tx_packets++; stats->tx_bytes += skb->len; stats->collisions += ((status & ColCountMask) - 1); } sis190_unmap_tx_skb(tp->pci_dev, skb, txd); tp->Tx_skbuff[entry] = NULL; dev_consume_skb_irq(skb); } if (tp->dirty_tx != dirty_tx) { tp->dirty_tx = dirty_tx; smp_wmb(); if (queue_stopped) netif_wake_queue(dev); } } /* * The interrupt handler does all of the Rx thread work and cleans up after * the Tx thread. */ static irqreturn_t sis190_irq(int irq, void *__dev) { struct net_device *dev = __dev; struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned int handled = 0; u32 status; status = SIS_R32(IntrStatus); if ((status == 0xffffffff) || !status) goto out; handled = 1; if (unlikely(!netif_running(dev))) { sis190_asic_down(ioaddr); goto out; } SIS_W32(IntrStatus, status); // netif_info(tp, intr, dev, "status = %08x\n", status); if (status & LinkChange) { netif_info(tp, intr, dev, "link change\n"); del_timer(&tp->timer); schedule_work(&tp->phy_task); } if (status & RxQInt) sis190_rx_interrupt(dev, tp, ioaddr); if (status & TxQ0Int) sis190_tx_interrupt(dev, tp, ioaddr); out: return IRQ_RETVAL(handled); } #ifdef CONFIG_NET_POLL_CONTROLLER static void sis190_netpoll(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); const int irq = tp->pci_dev->irq; disable_irq(irq); sis190_irq(irq, dev); enable_irq(irq); } #endif static void sis190_free_rx_skb(struct sis190_private *tp, struct sk_buff **sk_buff, struct RxDesc *desc) { struct pci_dev *pdev = tp->pci_dev; dma_unmap_single(&pdev->dev, le32_to_cpu(desc->addr), tp->rx_buf_sz, DMA_FROM_DEVICE); dev_kfree_skb(*sk_buff); *sk_buff = NULL; sis190_make_unusable_by_asic(desc); } static void sis190_rx_clear(struct sis190_private *tp) { unsigned int i; for (i = 0; i < NUM_RX_DESC; i++) { if (!tp->Rx_skbuff[i]) continue; sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i); } } static void sis190_init_ring_indexes(struct sis190_private *tp) { tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; } static int sis190_init_ring(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); sis190_init_ring_indexes(tp); memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *)); memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) goto err_rx_clear; sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1); return 0; err_rx_clear: sis190_rx_clear(tp); return -ENOMEM; } static void sis190_set_rx_mode(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; u32 mc_filter[2]; /* Multicast hash filter */ u16 rx_mode; if (dev->flags & IFF_PROMISC) { rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else if ((netdev_mc_count(dev) > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; netdev_for_each_mc_addr(ha, dev) { int bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3f; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; } } spin_lock_irqsave(&tp->lock, flags); SIS_W16(RxMacControl, rx_mode | 0x2); SIS_W32(RxHashTable, mc_filter[0]); SIS_W32(RxHashTable + 4, mc_filter[1]); spin_unlock_irqrestore(&tp->lock, flags); } static void sis190_soft_reset(void __iomem *ioaddr) { SIS_W32(IntrControl, 0x8000); SIS_PCI_COMMIT(); SIS_W32(IntrControl, 0x0); sis190_asic_down(ioaddr); } static void sis190_hw_start(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; sis190_soft_reset(ioaddr); SIS_W32(TxDescStartAddr, tp->tx_dma); SIS_W32(RxDescStartAddr, tp->rx_dma); SIS_W32(IntrStatus, 0xffffffff); SIS_W32(IntrMask, 0x0); SIS_W32(GMIIControl, 0x0); SIS_W32(TxMacControl, 0x60); SIS_W16(RxMacControl, 0x02); SIS_W32(RxHashTable, 0x0); SIS_W32(0x6c, 0x0); SIS_W32(RxWolCtrl, 0x0); SIS_W32(RxWolData, 0x0); SIS_PCI_COMMIT(); sis190_set_rx_mode(dev); /* Enable all known interrupts by setting the interrupt mask. */ SIS_W32(IntrMask, sis190_intr_mask); SIS_W32(TxControl, 0x1a00 | CmdTxEnb); SIS_W32(RxControl, 0x1a1d); netif_start_queue(dev); } static void sis190_phy_task(struct work_struct *work) { struct sis190_private *tp = container_of(work, struct sis190_private, phy_task); struct net_device *dev = tp->dev; void __iomem *ioaddr = tp->mmio_addr; int phy_id = tp->mii_if.phy_id; u16 val; rtnl_lock(); if (!netif_running(dev)) goto out_unlock; val = mdio_read(ioaddr, phy_id, MII_BMCR); if (val & BMCR_RESET) { // FIXME: needlessly high ? -- FR 02/07/2005 mod_timer(&tp->timer, jiffies + HZ/10); goto out_unlock; } val = mdio_read_latched(ioaddr, phy_id, MII_BMSR); if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) { netif_carrier_off(dev); netif_warn(tp, link, dev, "auto-negotiating...\n"); tp->link_status = LNK_AUTONEG; } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) { /* Rejoice ! */ struct { int val; u32 ctl; const char *msg; } reg31[] = { { LPA_1000FULL, 0x07000c00 | 0x00001000, "1000 Mbps Full Duplex" }, { LPA_1000HALF, 0x07000c00, "1000 Mbps Half Duplex" }, { LPA_100FULL, 0x04000800 | 0x00001000, "100 Mbps Full Duplex" }, { LPA_100HALF, 0x04000800, "100 Mbps Half Duplex" }, { LPA_10FULL, 0x04000400 | 0x00001000, "10 Mbps Full Duplex" }, { LPA_10HALF, 0x04000400, "10 Mbps Half Duplex" }, { 0, 0x04000400, "unknown" } }, *p = NULL; u16 adv, autoexp, gigadv, gigrec; val = mdio_read(ioaddr, phy_id, 0x1f); netif_info(tp, link, dev, "mii ext = %04x\n", val); val = mdio_read(ioaddr, phy_id, MII_LPA); adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE); autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION); netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n", val, adv, autoexp); if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) { /* check for gigabit speed */ gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000); gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000); val = (gigadv & (gigrec >> 2)); if (val & ADVERTISE_1000FULL) p = reg31; else if (val & ADVERTISE_1000HALF) p = reg31 + 1; } if (!p) { val &= adv; for (p = reg31; p->val; p++) { if ((val & p->val) == p->val) break; } } p->ctl |= SIS_R32(StationControl) & ~0x0f001c00; if ((tp->features & F_HAS_RGMII) && (tp->features & F_PHY_BCM5461)) { // Set Tx Delay in RGMII mode. mdio_write(ioaddr, phy_id, 0x18, 0xf1c7); udelay(200); mdio_write(ioaddr, phy_id, 0x1c, 0x8c00); p->ctl |= 0x03000000; } SIS_W32(StationControl, p->ctl); if (tp->features & F_HAS_RGMII) { SIS_W32(RGDelay, 0x0441); SIS_W32(RGDelay, 0x0440); } tp->negotiated_lpa = p->val; netif_info(tp, link, dev, "link on %s mode\n", p->msg); netif_carrier_on(dev); tp->link_status = LNK_ON; } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG) tp->link_status = LNK_OFF; mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); out_unlock: rtnl_unlock(); } static void sis190_phy_timer(struct timer_list *t) { struct sis190_private *tp = from_timer(tp, t, timer); struct net_device *dev = tp->dev; if (likely(netif_running(dev))) schedule_work(&tp->phy_task); } static inline void sis190_delete_timer(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); del_timer_sync(&tp->timer); } static inline void sis190_request_timer(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; timer_setup(timer, sis190_phy_timer, 0); timer->expires = jiffies + SIS190_PHY_TIMEOUT; add_timer(timer); } static void sis190_set_rxbufsize(struct sis190_private *tp, struct net_device *dev) { unsigned int mtu = dev->mtu; tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; /* RxDesc->size has a licence to kill the lower bits */ if (tp->rx_buf_sz & 0x07) { tp->rx_buf_sz += 8; tp->rx_buf_sz &= RX_BUF_MASK; } } static int sis190_open(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; int rc = -ENOMEM; sis190_set_rxbufsize(tp, dev); /* * Rx and Tx descriptors need 256 bytes alignment. * dma_alloc_coherent() guarantees a stronger alignment. */ tp->TxDescRing = dma_alloc_coherent(&pdev->dev, TX_RING_BYTES, &tp->tx_dma, GFP_KERNEL); if (!tp->TxDescRing) goto out; tp->RxDescRing = dma_alloc_coherent(&pdev->dev, RX_RING_BYTES, &tp->rx_dma, GFP_KERNEL); if (!tp->RxDescRing) goto err_free_tx_0; rc = sis190_init_ring(dev); if (rc < 0) goto err_free_rx_1; sis190_request_timer(dev); rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev); if (rc < 0) goto err_release_timer_2; sis190_hw_start(dev); out: return rc; err_release_timer_2: sis190_delete_timer(dev); sis190_rx_clear(tp); err_free_rx_1: dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); err_free_tx_0: dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); goto out; } static void sis190_tx_clear(struct sis190_private *tp) { unsigned int i; for (i = 0; i < NUM_TX_DESC; i++) { struct sk_buff *skb = tp->Tx_skbuff[i]; if (!skb) continue; sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i); tp->Tx_skbuff[i] = NULL; dev_kfree_skb(skb); tp->dev->stats.tx_dropped++; } tp->cur_tx = tp->dirty_tx = 0; } static void sis190_down(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned int poll_locked = 0; sis190_delete_timer(dev); netif_stop_queue(dev); do { spin_lock_irq(&tp->lock); sis190_asic_down(ioaddr); spin_unlock_irq(&tp->lock); synchronize_irq(tp->pci_dev->irq); if (!poll_locked) poll_locked++; synchronize_rcu(); } while (SIS_R32(IntrMask)); sis190_tx_clear(tp); sis190_rx_clear(tp); } static int sis190_close(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; sis190_down(dev); free_irq(pdev->irq, dev); dma_free_coherent(&pdev->dev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); dma_free_coherent(&pdev->dev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); tp->TxDescRing = NULL; tp->RxDescRing = NULL; return 0; } static netdev_tx_t sis190_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u32 len, entry, dirty_tx; struct TxDesc *desc; dma_addr_t mapping; if (unlikely(skb->len < ETH_ZLEN)) { if (skb_padto(skb, ETH_ZLEN)) { dev->stats.tx_dropped++; goto out; } len = ETH_ZLEN; } else { len = skb->len; } entry = tp->cur_tx % NUM_TX_DESC; desc = tp->TxDescRing + entry; if (unlikely(le32_to_cpu(desc->status) & OWNbit)) { netif_stop_queue(dev); netif_err(tp, tx_err, dev, "BUG! Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; } mapping = dma_map_single(&tp->pci_dev->dev, skb->data, len, DMA_TO_DEVICE); if (dma_mapping_error(&tp->pci_dev->dev, mapping)) { netif_err(tp, tx_err, dev, "PCI mapping failed, dropping packet"); return NETDEV_TX_BUSY; } tp->Tx_skbuff[entry] = skb; desc->PSize = cpu_to_le32(len); desc->addr = cpu_to_le32(mapping); desc->size = cpu_to_le32(len); if (entry == (NUM_TX_DESC - 1)) desc->size |= cpu_to_le32(RingEnd); wmb(); desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit); if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) { /* Half Duplex */ desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN); if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL)) desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */ } tp->cur_tx++; smp_wmb(); SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb); dirty_tx = tp->dirty_tx; if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) { netif_stop_queue(dev); smp_rmb(); if (dirty_tx != tp->dirty_tx) netif_wake_queue(dev); } out: return NETDEV_TX_OK; } static void sis190_free_phy(struct list_head *first_phy) { struct sis190_phy *cur, *next; list_for_each_entry_safe(cur, next, first_phy, list) { kfree(cur); } } /** * sis190_default_phy - Select default PHY for sis190 mac. * @dev: the net device to probe for * * Select first detected PHY with link as default. * If no one is link on, select PHY whose types is HOME as default. * If HOME doesn't exist, select LAN. */ static u16 sis190_default_phy(struct net_device *dev) { struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan; struct sis190_private *tp = netdev_priv(dev); struct mii_if_info *mii_if = &tp->mii_if; void __iomem *ioaddr = tp->mmio_addr; u16 status; phy_home = phy_default = phy_lan = NULL; list_for_each_entry(phy, &tp->first_phy, list) { status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR); // Link ON & Not select default PHY & not ghost PHY. if ((status & BMSR_LSTATUS) && !phy_default && (phy->type != UNKNOWN)) { phy_default = phy; } else { status = mdio_read(ioaddr, phy->phy_id, MII_BMCR); mdio_write(ioaddr, phy->phy_id, MII_BMCR, status | BMCR_ANENABLE | BMCR_ISOLATE); if (phy->type == HOME) phy_home = phy; else if (phy->type == LAN) phy_lan = phy; } } if (!phy_default) { if (phy_home) phy_default = phy_home; else if (phy_lan) phy_default = phy_lan; else phy_default = list_first_entry(&tp->first_phy, struct sis190_phy, list); } if (mii_if->phy_id != phy_default->phy_id) { mii_if->phy_id = phy_default->phy_id; if (netif_msg_probe(tp)) pr_info("%s: Using transceiver at address %d as default\n", pci_name(tp->pci_dev), mii_if->phy_id); } status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR); status &= (~BMCR_ISOLATE); mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status); status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR); return status; } static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp, struct sis190_phy *phy, unsigned int phy_id, u16 mii_status) { void __iomem *ioaddr = tp->mmio_addr; struct mii_chip_info *p; INIT_LIST_HEAD(&phy->list); phy->status = mii_status; phy->phy_id = phy_id; phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1); phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2); for (p = mii_chip_table; p->type; p++) { if ((p->id[0] == phy->id[0]) && (p->id[1] == (phy->id[1] & 0xfff0))) { break; } } if (p->id[1]) { phy->type = (p->type == MIX) ? ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ? LAN : HOME) : p->type; tp->features |= p->feature; if (netif_msg_probe(tp)) pr_info("%s: %s transceiver at address %d\n", pci_name(tp->pci_dev), p->name, phy_id); } else { phy->type = UNKNOWN; if (netif_msg_probe(tp)) pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n", pci_name(tp->pci_dev), phy->id[0], (phy->id[1] & 0xfff0), phy_id); } } static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp) { if (tp->features & F_PHY_88E1111) { void __iomem *ioaddr = tp->mmio_addr; int phy_id = tp->mii_if.phy_id; u16 reg[2][2] = { { 0x808b, 0x0ce1 }, { 0x808f, 0x0c60 } }, *p; p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1]; mdio_write(ioaddr, phy_id, 0x1b, p[0]); udelay(200); mdio_write(ioaddr, phy_id, 0x14, p[1]); udelay(200); } } /** * sis190_mii_probe - Probe MII PHY for sis190 * @dev: the net device to probe for * * Search for total of 32 possible mii phy addresses. * Identify and set current phy if found one, * return error if it failed to found. */ static int sis190_mii_probe(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); struct mii_if_info *mii_if = &tp->mii_if; void __iomem *ioaddr = tp->mmio_addr; int phy_id; int rc = 0; INIT_LIST_HEAD(&tp->first_phy); for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { struct sis190_phy *phy; u16 status; status = mdio_read_latched(ioaddr, phy_id, MII_BMSR); // Try next mii if the current one is not accessible. if (status == 0xffff || status == 0x0000) continue; phy = kmalloc(sizeof(*phy), GFP_KERNEL); if (!phy) { sis190_free_phy(&tp->first_phy); rc = -ENOMEM; goto out; } sis190_init_phy(dev, tp, phy, phy_id, status); list_add(&tp->first_phy, &phy->list); } if (list_empty(&tp->first_phy)) { if (netif_msg_probe(tp)) pr_info("%s: No MII transceivers found!\n", pci_name(tp->pci_dev)); rc = -EIO; goto out; } /* Select default PHY for mac */ sis190_default_phy(dev); sis190_mii_probe_88e1111_fixup(tp); mii_if->dev = dev; mii_if->mdio_read = __mdio_read; mii_if->mdio_write = __mdio_write; mii_if->phy_id_mask = PHY_ID_ANY; mii_if->reg_num_mask = MII_REG_ANY; out: return rc; } static void sis190_mii_remove(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); sis190_free_phy(&tp->first_phy); } static void sis190_release_board(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct sis190_private *tp = netdev_priv(dev); iounmap(tp->mmio_addr); pci_release_regions(pdev); pci_disable_device(pdev); free_netdev(dev); } static struct net_device *sis190_init_board(struct pci_dev *pdev) { struct sis190_private *tp; struct net_device *dev; void __iomem *ioaddr; int rc; dev = alloc_etherdev(sizeof(*tp)); if (!dev) { rc = -ENOMEM; goto err_out_0; } SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); tp->dev = dev; tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); rc = pci_enable_device(pdev); if (rc < 0) { if (netif_msg_probe(tp)) pr_err("%s: enable failure\n", pci_name(pdev)); goto err_free_dev_1; } rc = -ENODEV; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { if (netif_msg_probe(tp)) pr_err("%s: region #0 is no MMIO resource\n", pci_name(pdev)); goto err_pci_disable_2; } if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) { if (netif_msg_probe(tp)) pr_err("%s: invalid PCI region size(s)\n", pci_name(pdev)); goto err_pci_disable_2; } rc = pci_request_regions(pdev, DRV_NAME); if (rc < 0) { if (netif_msg_probe(tp)) pr_err("%s: could not request regions\n", pci_name(pdev)); goto err_pci_disable_2; } rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (rc < 0) { if (netif_msg_probe(tp)) pr_err("%s: DMA configuration failed\n", pci_name(pdev)); goto err_free_res_3; } pci_set_master(pdev); ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE); if (!ioaddr) { if (netif_msg_probe(tp)) pr_err("%s: cannot remap MMIO, aborting\n", pci_name(pdev)); rc = -EIO; goto err_free_res_3; } tp->pci_dev = pdev; tp->mmio_addr = ioaddr; tp->link_status = LNK_OFF; sis190_irq_mask_and_ack(ioaddr); sis190_soft_reset(ioaddr); out: return dev; err_free_res_3: pci_release_regions(pdev); err_pci_disable_2: pci_disable_device(pdev); err_free_dev_1: free_netdev(dev); err_out_0: dev = ERR_PTR(rc); goto out; } static void sis190_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u8 tmp8; /* Disable Tx, if not already */ tmp8 = SIS_R8(TxControl); if (tmp8 & CmdTxEnb) SIS_W8(TxControl, tmp8 & ~CmdTxEnb); netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n", SIS_R32(TxControl), SIS_R32(TxSts)); /* Disable interrupts by clearing the interrupt mask. */ SIS_W32(IntrMask, 0x0000); /* Stop a shared interrupt from scavenging while we are. */ spin_lock_irq(&tp->lock); sis190_tx_clear(tp); spin_unlock_irq(&tp->lock); /* ...and finally, reset everything. */ sis190_hw_start(dev); netif_wake_queue(dev); } static void sis190_set_rgmii(struct sis190_private *tp, u8 reg) { tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0; } static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; __le16 addr[ETH_ALEN / 2]; u16 sig; int i; if (netif_msg_probe(tp)) pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev)); /* Check to see if there is a sane EEPROM */ sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature); if ((sig == 0xffff) || (sig == 0x0000)) { if (netif_msg_probe(tp)) pr_info("%s: Error EEPROM read %x\n", pci_name(pdev), sig); return -EIO; } /* Get MAC address from EEPROM */ for (i = 0; i < ETH_ALEN / 2; i++) { u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i); addr[i] = cpu_to_le16(w); } eth_hw_addr_set(dev, (u8 *)addr); sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo)); return 0; } /** * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model * @pdev: PCI device * @dev: network device to get address for * * SiS96x model, use APC CMOS RAM to store MAC address. * APC CMOS RAM is accessed through ISA bridge. * MAC address is read into @net_dev->dev_addr. */ static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev, struct net_device *dev) { static const u16 ids[] = { 0x0965, 0x0966, 0x0968 }; struct sis190_private *tp = netdev_priv(dev); struct pci_dev *isa_bridge; u8 addr[ETH_ALEN]; u8 reg, tmp8; unsigned int i; if (netif_msg_probe(tp)) pr_info("%s: Read MAC address from APC\n", pci_name(pdev)); for (i = 0; i < ARRAY_SIZE(ids); i++) { isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL); if (isa_bridge) break; } if (!isa_bridge) { if (netif_msg_probe(tp)) pr_info("%s: Can not find ISA bridge\n", pci_name(pdev)); return -EIO; } /* Enable port 78h & 79h to access APC Registers. */ pci_read_config_byte(isa_bridge, 0x48, &tmp8); reg = (tmp8 & ~0x02); pci_write_config_byte(isa_bridge, 0x48, reg); udelay(50); pci_read_config_byte(isa_bridge, 0x48, &reg); for (i = 0; i < ETH_ALEN; i++) { outb(0x9 + i, 0x78); addr[i] = inb(0x79); } eth_hw_addr_set(dev, addr); outb(0x12, 0x78); reg = inb(0x79); sis190_set_rgmii(tp, reg); /* Restore the value to ISA Bridge */ pci_write_config_byte(isa_bridge, 0x48, tmp8); pci_dev_put(isa_bridge); return 0; } /** * sis190_init_rxfilter - Initialize the Rx filter * @dev: network device to initialize * * Set receive filter address to our MAC address * and enable packet filtering. */ static inline void sis190_init_rxfilter(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u16 ctl; int i; ctl = SIS_R16(RxMacControl); /* * Disable packet filtering before setting filter. * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits * only and followed by RxMacAddr (6 bytes). Strange. -- FR */ SIS_W16(RxMacControl, ctl & ~0x0f00); for (i = 0; i < ETH_ALEN; i++) SIS_W8(RxMacAddr + i, dev->dev_addr[i]); SIS_W16(RxMacControl, ctl); SIS_PCI_COMMIT(); } static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev) { int rc; rc = sis190_get_mac_addr_from_eeprom(pdev, dev); if (rc < 0) { u8 reg; pci_read_config_byte(pdev, 0x73, &reg); if (reg & 0x00000001) rc = sis190_get_mac_addr_from_apc(pdev, dev); } return rc; } static void sis190_set_speed_auto(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; int phy_id = tp->mii_if.phy_id; int val; netif_info(tp, link, dev, "Enabling Auto-negotiation\n"); val = mdio_read(ioaddr, phy_id, MII_ADVERTISE); // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0 // unchanged. mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) | ADVERTISE_100FULL | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_10HALF); // Enable 1000 Full Mode. mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL); // Enable auto-negotiation and restart auto-negotiation. mdio_write(ioaddr, phy_id, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET); } static int sis190_get_link_ksettings(struct net_device *dev, struct ethtool_link_ksettings *cmd) { struct sis190_private *tp = netdev_priv(dev); mii_ethtool_get_link_ksettings(&tp->mii_if, cmd); return 0; } static int sis190_set_link_ksettings(struct net_device *dev, const struct ethtool_link_ksettings *cmd) { struct sis190_private *tp = netdev_priv(dev); return mii_ethtool_set_link_ksettings(&tp->mii_if, cmd); } static void sis190_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct sis190_private *tp = netdev_priv(dev); strscpy(info->driver, DRV_NAME, sizeof(info->driver)); strscpy(info->version, DRV_VERSION, sizeof(info->version)); strscpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); } static int sis190_get_regs_len(struct net_device *dev) { return SIS190_REGS_SIZE; } static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct sis190_private *tp = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&tp->lock, flags); memcpy_fromio(p, tp->mmio_addr, regs->len); spin_unlock_irqrestore(&tp->lock, flags); } static int sis190_nway_reset(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); return mii_nway_restart(&tp->mii_if); } static u32 sis190_get_msglevel(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); return tp->msg_enable; } static void sis190_set_msglevel(struct net_device *dev, u32 value) { struct sis190_private *tp = netdev_priv(dev); tp->msg_enable = value; } static const struct ethtool_ops sis190_ethtool_ops = { .get_drvinfo = sis190_get_drvinfo, .get_regs_len = sis190_get_regs_len, .get_regs = sis190_get_regs, .get_link = ethtool_op_get_link, .get_msglevel = sis190_get_msglevel, .set_msglevel = sis190_set_msglevel, .nway_reset = sis190_nway_reset, .get_link_ksettings = sis190_get_link_ksettings, .set_link_ksettings = sis190_set_link_ksettings, }; static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct sis190_private *tp = netdev_priv(dev); return !netif_running(dev) ? -EINVAL : generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL); } static int sis190_mac_addr(struct net_device *dev, void *p) { int rc; rc = eth_mac_addr(dev, p); if (!rc) sis190_init_rxfilter(dev); return rc; } static const struct net_device_ops sis190_netdev_ops = { .ndo_open = sis190_open, .ndo_stop = sis190_close, .ndo_eth_ioctl = sis190_ioctl, .ndo_start_xmit = sis190_start_xmit, .ndo_tx_timeout = sis190_tx_timeout, .ndo_set_rx_mode = sis190_set_rx_mode, .ndo_set_mac_address = sis190_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sis190_netpoll, #endif }; static int sis190_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version = 0; struct sis190_private *tp; struct net_device *dev; void __iomem *ioaddr; int rc; if (!printed_version) { if (netif_msg_drv(&debug)) pr_info(SIS190_DRIVER_NAME " loaded\n"); printed_version = 1; } dev = sis190_init_board(pdev); if (IS_ERR(dev)) { rc = PTR_ERR(dev); goto out; } pci_set_drvdata(pdev, dev); tp = netdev_priv(dev); ioaddr = tp->mmio_addr; rc = sis190_get_mac_addr(pdev, dev); if (rc < 0) goto err_release_board; sis190_init_rxfilter(dev); INIT_WORK(&tp->phy_task, sis190_phy_task); dev->netdev_ops = &sis190_netdev_ops; dev->ethtool_ops = &sis190_ethtool_ops; dev->watchdog_timeo = SIS190_TX_TIMEOUT; spin_lock_init(&tp->lock); rc = sis190_mii_probe(dev); if (rc < 0) goto err_release_board; rc = register_netdev(dev); if (rc < 0) goto err_remove_mii; if (netif_msg_probe(tp)) { netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n", pci_name(pdev), sis_chip_info[ent->driver_data].name, ioaddr, pdev->irq, dev->dev_addr); netdev_info(dev, "%s mode.\n", (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); } netif_carrier_off(dev); sis190_set_speed_auto(dev); out: return rc; err_remove_mii: sis190_mii_remove(dev); err_release_board: sis190_release_board(pdev); goto out; } static void sis190_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct sis190_private *tp = netdev_priv(dev); sis190_mii_remove(dev); cancel_work_sync(&tp->phy_task); unregister_netdev(dev); sis190_release_board(pdev); } static struct pci_driver sis190_pci_driver = { .name = DRV_NAME, .id_table = sis190_pci_tbl, .probe = sis190_init_one, .remove = sis190_remove_one, }; module_pci_driver(sis190_pci_driver);
// SPDX-License-Identifier: GPL-2.0 #include <dt-bindings/clock/tegra186-clock.h> #include <dt-bindings/gpio/tegra186-gpio.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/mailbox/tegra186-hsp.h> #include <dt-bindings/memory/tegra186-mc.h> #include <dt-bindings/pinctrl/pinctrl-tegra-io-pad.h> #include <dt-bindings/power/tegra186-powergate.h> #include <dt-bindings/reset/tegra186-reset.h> #include <dt-bindings/thermal/tegra186-bpmp-thermal.h> / { compatible = "nvidia,tegra186"; interrupt-parent = <&gic>; #address-cells = <2>; #size-cells = <2>; misc@100000 { compatible = "nvidia,tegra186-misc"; reg = <0x0 0x00100000 0x0 0xf000>, <0x0 0x0010f000 0x0 0x1000>; }; gpio: gpio@2200000 { compatible = "nvidia,tegra186-gpio"; reg-names = "security", "gpio"; reg = <0x0 0x2200000 0x0 0x10000>, <0x0 0x2210000 0x0 0x10000>; interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 53 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 56 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 59 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 180 IRQ_TYPE_LEVEL_HIGH>; #interrupt-cells = <2>; interrupt-controller; #gpio-cells = <2>; gpio-controller; }; ethernet@2490000 { compatible = "nvidia,tegra186-eqos", "snps,dwc-qos-ethernet-4.10"; reg = <0x0 0x02490000 0x0 0x10000>; interrupts = <GIC_SPI 194 IRQ_TYPE_LEVEL_HIGH>, /* common */ <GIC_SPI 195 IRQ_TYPE_LEVEL_HIGH>, /* power */ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>, /* rx0 */ <GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>, /* tx0 */ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>, /* rx1 */ <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>, /* tx1 */ <GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>, /* rx2 */ <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>, /* tx2 */ <GIC_SPI 193 IRQ_TYPE_LEVEL_HIGH>, /* rx3 */ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>; /* tx3 */ clocks = <&bpmp TEGRA186_CLK_AXI_CBB>, <&bpmp TEGRA186_CLK_EQOS_AXI>, <&bpmp TEGRA186_CLK_EQOS_RX>, <&bpmp TEGRA186_CLK_EQOS_TX>, <&bpmp TEGRA186_CLK_EQOS_PTP_REF>; clock-names = "master_bus", "slave_bus", "rx", "tx", "ptp_ref"; resets = <&bpmp TEGRA186_RESET_EQOS>; reset-names = "eqos"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_EQOSR &emc>, <&mc TEGRA186_MEMORY_CLIENT_EQOSW &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_EQOS>; status = "disabled"; snps,write-requests = <1>; snps,read-requests = <3>; snps,burst-map = <0x7>; snps,txpbl = <32>; snps,rxpbl = <8>; }; gpcdma: dma-controller@2600000 { compatible = "nvidia,tegra186-gpcdma"; reg = <0x0 0x2600000 0x0 0x210000>; resets = <&bpmp TEGRA186_RESET_GPCDMA>; reset-names = "gpcdma"; interrupts = <GIC_SPI 75 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 76 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 80 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 92 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 93 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 94 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; iommus = <&smmu TEGRA186_SID_GPCDMA_0>; dma-coherent; dma-channel-mask = <0xfffffffe>; status = "okay"; }; aconnect@2900000 { compatible = "nvidia,tegra186-aconnect", "nvidia,tegra210-aconnect"; clocks = <&bpmp TEGRA186_CLK_APE>, <&bpmp TEGRA186_CLK_APB2APE>; clock-names = "ape", "apb2ape"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_AUD>; #address-cells = <1>; #size-cells = <1>; ranges = <0x02900000 0x0 0x02900000 0x200000>; status = "disabled"; tegra_ahub: ahub@2900800 { compatible = "nvidia,tegra186-ahub"; reg = <0x02900800 0x800>; clocks = <&bpmp TEGRA186_CLK_AHUB>; clock-names = "ahub"; assigned-clocks = <&bpmp TEGRA186_CLK_AHUB>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLP_OUT0>; assigned-clock-rates = <81600000>; #address-cells = <1>; #size-cells = <1>; ranges = <0x02900800 0x02900800 0x11800>; status = "disabled"; tegra_i2s1: i2s@2901000 { compatible = "nvidia,tegra186-i2s", "nvidia,tegra210-i2s"; reg = <0x2901000 0x100>; clocks = <&bpmp TEGRA186_CLK_I2S1>, <&bpmp TEGRA186_CLK_I2S1_SYNC_INPUT>; clock-names = "i2s", "sync_input"; assigned-clocks = <&bpmp TEGRA186_CLK_I2S1>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <1536000>; sound-name-prefix = "I2S1"; status = "disabled"; }; tegra_i2s2: i2s@2901100 { compatible = "nvidia,tegra186-i2s", "nvidia,tegra210-i2s"; reg = <0x2901100 0x100>; clocks = <&bpmp TEGRA186_CLK_I2S2>, <&bpmp TEGRA186_CLK_I2S2_SYNC_INPUT>; clock-names = "i2s", "sync_input"; assigned-clocks = <&bpmp TEGRA186_CLK_I2S2>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <1536000>; sound-name-prefix = "I2S2"; status = "disabled"; }; tegra_i2s3: i2s@2901200 { compatible = "nvidia,tegra186-i2s", "nvidia,tegra210-i2s"; reg = <0x2901200 0x100>; clocks = <&bpmp TEGRA186_CLK_I2S3>, <&bpmp TEGRA186_CLK_I2S3_SYNC_INPUT>; clock-names = "i2s", "sync_input"; assigned-clocks = <&bpmp TEGRA186_CLK_I2S3>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <1536000>; sound-name-prefix = "I2S3"; status = "disabled"; }; tegra_i2s4: i2s@2901300 { compatible = "nvidia,tegra186-i2s", "nvidia,tegra210-i2s"; reg = <0x2901300 0x100>; clocks = <&bpmp TEGRA186_CLK_I2S4>, <&bpmp TEGRA186_CLK_I2S4_SYNC_INPUT>; clock-names = "i2s", "sync_input"; assigned-clocks = <&bpmp TEGRA186_CLK_I2S4>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <1536000>; sound-name-prefix = "I2S4"; status = "disabled"; }; tegra_i2s5: i2s@2901400 { compatible = "nvidia,tegra186-i2s", "nvidia,tegra210-i2s"; reg = <0x2901400 0x100>; clocks = <&bpmp TEGRA186_CLK_I2S5>, <&bpmp TEGRA186_CLK_I2S5_SYNC_INPUT>; clock-names = "i2s", "sync_input"; assigned-clocks = <&bpmp TEGRA186_CLK_I2S5>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <1536000>; sound-name-prefix = "I2S5"; status = "disabled"; }; tegra_i2s6: i2s@2901500 { compatible = "nvidia,tegra186-i2s", "nvidia,tegra210-i2s"; reg = <0x2901500 0x100>; clocks = <&bpmp TEGRA186_CLK_I2S6>, <&bpmp TEGRA186_CLK_I2S6_SYNC_INPUT>; clock-names = "i2s", "sync_input"; assigned-clocks = <&bpmp TEGRA186_CLK_I2S6>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <1536000>; sound-name-prefix = "I2S6"; status = "disabled"; }; tegra_sfc1: sfc@2902000 { compatible = "nvidia,tegra186-sfc", "nvidia,tegra210-sfc"; reg = <0x2902000 0x200>; sound-name-prefix = "SFC1"; status = "disabled"; }; tegra_sfc2: sfc@2902200 { compatible = "nvidia,tegra186-sfc", "nvidia,tegra210-sfc"; reg = <0x2902200 0x200>; sound-name-prefix = "SFC2"; status = "disabled"; }; tegra_sfc3: sfc@2902400 { compatible = "nvidia,tegra186-sfc", "nvidia,tegra210-sfc"; reg = <0x2902400 0x200>; sound-name-prefix = "SFC3"; status = "disabled"; }; tegra_sfc4: sfc@2902600 { compatible = "nvidia,tegra186-sfc", "nvidia,tegra210-sfc"; reg = <0x2902600 0x200>; sound-name-prefix = "SFC4"; status = "disabled"; }; tegra_amx1: amx@2903000 { compatible = "nvidia,tegra186-amx", "nvidia,tegra210-amx"; reg = <0x2903000 0x100>; sound-name-prefix = "AMX1"; status = "disabled"; }; tegra_amx2: amx@2903100 { compatible = "nvidia,tegra186-amx", "nvidia,tegra210-amx"; reg = <0x2903100 0x100>; sound-name-prefix = "AMX2"; status = "disabled"; }; tegra_amx3: amx@2903200 { compatible = "nvidia,tegra186-amx", "nvidia,tegra210-amx"; reg = <0x2903200 0x100>; sound-name-prefix = "AMX3"; status = "disabled"; }; tegra_amx4: amx@2903300 { compatible = "nvidia,tegra186-amx", "nvidia,tegra210-amx"; reg = <0x2903300 0x100>; sound-name-prefix = "AMX4"; status = "disabled"; }; tegra_adx1: adx@2903800 { compatible = "nvidia,tegra186-adx", "nvidia,tegra210-adx"; reg = <0x2903800 0x100>; sound-name-prefix = "ADX1"; status = "disabled"; }; tegra_adx2: adx@2903900 { compatible = "nvidia,tegra186-adx", "nvidia,tegra210-adx"; reg = <0x2903900 0x100>; sound-name-prefix = "ADX2"; status = "disabled"; }; tegra_adx3: adx@2903a00 { compatible = "nvidia,tegra186-adx", "nvidia,tegra210-adx"; reg = <0x2903a00 0x100>; sound-name-prefix = "ADX3"; status = "disabled"; }; tegra_adx4: adx@2903b00 { compatible = "nvidia,tegra186-adx", "nvidia,tegra210-adx"; reg = <0x2903b00 0x100>; sound-name-prefix = "ADX4"; status = "disabled"; }; tegra_dmic1: dmic@2904000 { compatible = "nvidia,tegra210-dmic"; reg = <0x2904000 0x100>; clocks = <&bpmp TEGRA186_CLK_DMIC1>; clock-names = "dmic"; assigned-clocks = <&bpmp TEGRA186_CLK_DMIC1>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <3072000>; sound-name-prefix = "DMIC1"; status = "disabled"; }; tegra_dmic2: dmic@2904100 { compatible = "nvidia,tegra210-dmic"; reg = <0x2904100 0x100>; clocks = <&bpmp TEGRA186_CLK_DMIC2>; clock-names = "dmic"; assigned-clocks = <&bpmp TEGRA186_CLK_DMIC2>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <3072000>; sound-name-prefix = "DMIC2"; status = "disabled"; }; tegra_dmic3: dmic@2904200 { compatible = "nvidia,tegra210-dmic"; reg = <0x2904200 0x100>; clocks = <&bpmp TEGRA186_CLK_DMIC3>; clock-names = "dmic"; assigned-clocks = <&bpmp TEGRA186_CLK_DMIC3>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <3072000>; sound-name-prefix = "DMIC3"; status = "disabled"; }; tegra_dmic4: dmic@2904300 { compatible = "nvidia,tegra210-dmic"; reg = <0x2904300 0x100>; clocks = <&bpmp TEGRA186_CLK_DMIC4>; clock-names = "dmic"; assigned-clocks = <&bpmp TEGRA186_CLK_DMIC4>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <3072000>; sound-name-prefix = "DMIC4"; status = "disabled"; }; tegra_dspk1: dspk@2905000 { compatible = "nvidia,tegra186-dspk"; reg = <0x2905000 0x100>; clocks = <&bpmp TEGRA186_CLK_DSPK1>; clock-names = "dspk"; assigned-clocks = <&bpmp TEGRA186_CLK_DSPK1>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <12288000>; sound-name-prefix = "DSPK1"; status = "disabled"; }; tegra_dspk2: dspk@2905100 { compatible = "nvidia,tegra186-dspk"; reg = <0x2905100 0x100>; clocks = <&bpmp TEGRA186_CLK_DSPK2>; clock-names = "dspk"; assigned-clocks = <&bpmp TEGRA186_CLK_DSPK2>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLL_A_OUT0>; assigned-clock-rates = <12288000>; sound-name-prefix = "DSPK2"; status = "disabled"; }; tegra_ope1: processing-engine@2908000 { compatible = "nvidia,tegra186-ope", "nvidia,tegra210-ope"; reg = <0x2908000 0x100>; #address-cells = <1>; #size-cells = <1>; ranges; sound-name-prefix = "OPE1"; status = "disabled"; equalizer@2908100 { compatible = "nvidia,tegra186-peq", "nvidia,tegra210-peq"; reg = <0x2908100 0x100>; }; dynamic-range-compressor@2908200 { compatible = "nvidia,tegra186-mbdrc", "nvidia,tegra210-mbdrc"; reg = <0x2908200 0x200>; }; }; tegra_mvc1: mvc@290a000 { compatible = "nvidia,tegra186-mvc", "nvidia,tegra210-mvc"; reg = <0x290a000 0x200>; sound-name-prefix = "MVC1"; status = "disabled"; }; tegra_mvc2: mvc@290a200 { compatible = "nvidia,tegra186-mvc", "nvidia,tegra210-mvc"; reg = <0x290a200 0x200>; sound-name-prefix = "MVC2"; status = "disabled"; }; tegra_amixer: amixer@290bb00 { compatible = "nvidia,tegra186-amixer", "nvidia,tegra210-amixer"; reg = <0x290bb00 0x800>; sound-name-prefix = "MIXER1"; status = "disabled"; }; tegra_admaif: admaif@290f000 { compatible = "nvidia,tegra186-admaif"; reg = <0x0290f000 0x1000>; dmas = <&adma 1>, <&adma 1>, <&adma 2>, <&adma 2>, <&adma 3>, <&adma 3>, <&adma 4>, <&adma 4>, <&adma 5>, <&adma 5>, <&adma 6>, <&adma 6>, <&adma 7>, <&adma 7>, <&adma 8>, <&adma 8>, <&adma 9>, <&adma 9>, <&adma 10>, <&adma 10>, <&adma 11>, <&adma 11>, <&adma 12>, <&adma 12>, <&adma 13>, <&adma 13>, <&adma 14>, <&adma 14>, <&adma 15>, <&adma 15>, <&adma 16>, <&adma 16>, <&adma 17>, <&adma 17>, <&adma 18>, <&adma 18>, <&adma 19>, <&adma 19>, <&adma 20>, <&adma 20>; dma-names = "rx1", "tx1", "rx2", "tx2", "rx3", "tx3", "rx4", "tx4", "rx5", "tx5", "rx6", "tx6", "rx7", "tx7", "rx8", "tx8", "rx9", "tx9", "rx10", "tx10", "rx11", "tx11", "rx12", "tx12", "rx13", "tx13", "rx14", "tx14", "rx15", "tx15", "rx16", "tx16", "rx17", "tx17", "rx18", "tx18", "rx19", "tx19", "rx20", "tx20"; status = "disabled"; }; tegra_asrc: asrc@2910000 { compatible = "nvidia,tegra186-asrc"; reg = <0x2910000 0x2000>; sound-name-prefix = "ASRC1"; status = "disabled"; }; }; adma: dma-controller@2930000 { compatible = "nvidia,tegra186-adma"; reg = <0x02930000 0x20000>; interrupt-parent = <&agic>; interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; clocks = <&bpmp TEGRA186_CLK_AHUB>; clock-names = "d_audio"; status = "disabled"; }; agic: interrupt-controller@2a40000 { compatible = "nvidia,tegra186-agic", "nvidia,tegra210-agic"; #interrupt-cells = <3>; interrupt-controller; reg = <0x02a41000 0x1000>, <0x02a42000 0x2000>; interrupts = <GIC_SPI 145 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; clocks = <&bpmp TEGRA186_CLK_APE>; clock-names = "clk"; status = "disabled"; }; }; mc: memory-controller@2c00000 { compatible = "nvidia,tegra186-mc"; reg = <0x0 0x02c00000 0x0 0x10000>, /* MC-SID */ <0x0 0x02c10000 0x0 0x10000>, /* Broadcast channel */ <0x0 0x02c20000 0x0 0x10000>, /* MC0 */ <0x0 0x02c30000 0x0 0x10000>, /* MC1 */ <0x0 0x02c40000 0x0 0x10000>, /* MC2 */ <0x0 0x02c50000 0x0 0x10000>; /* MC3 */ reg-names = "sid", "broadcast", "ch0", "ch1", "ch2", "ch3"; interrupts = <GIC_SPI 223 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; #interconnect-cells = <1>; #address-cells = <2>; #size-cells = <2>; ranges = <0x0 0x02c00000 0x0 0x02c00000 0x0 0xb0000>; /* * Memory clients have access to all 40 bits that the memory * controller can address. */ dma-ranges = <0x0 0x0 0x0 0x0 0x100 0x0>; emc: external-memory-controller@2c60000 { compatible = "nvidia,tegra186-emc"; reg = <0x0 0x02c60000 0x0 0x50000>; interrupts = <GIC_SPI 224 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_EMC>; clock-names = "emc"; #interconnect-cells = <0>; nvidia,bpmp = <&bpmp>; }; }; timer@3010000 { compatible = "nvidia,tegra186-timer"; reg = <0x0 0x03010000 0x0 0x000e0000>; interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>; status = "okay"; }; uarta: serial@3100000 { compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart"; reg = <0x0 0x03100000 0x0 0x40>; reg-shift = <2>; interrupts = <GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_UARTA>; resets = <&bpmp TEGRA186_RESET_UARTA>; status = "disabled"; }; uartb: serial@3110000 { compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart"; reg = <0x0 0x03110000 0x0 0x40>; reg-shift = <2>; interrupts = <GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_UARTB>; clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTB>; reset-names = "serial"; status = "disabled"; }; uartd: serial@3130000 { compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart"; reg = <0x0 0x03130000 0x0 0x40>; reg-shift = <2>; interrupts = <GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_UARTD>; clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTD>; reset-names = "serial"; status = "disabled"; }; uarte: serial@3140000 { compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart"; reg = <0x0 0x03140000 0x0 0x40>; reg-shift = <2>; interrupts = <GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_UARTE>; clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTE>; reset-names = "serial"; status = "disabled"; }; uartf: serial@3150000 { compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart"; reg = <0x0 0x03150000 0x0 0x40>; reg-shift = <2>; interrupts = <GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_UARTF>; clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTF>; reset-names = "serial"; status = "disabled"; }; gen1_i2c: i2c@3160000 { compatible = "nvidia,tegra186-i2c"; reg = <0x0 0x03160000 0x0 0x10000>; interrupts = <GIC_SPI 25 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&bpmp TEGRA186_CLK_I2C1>; clock-names = "div-clk"; resets = <&bpmp TEGRA186_RESET_I2C1>; reset-names = "i2c"; dmas = <&gpcdma 21>, <&gpcdma 21>; dma-names = "rx", "tx"; status = "disabled"; }; cam_i2c: i2c@3180000 { compatible = "nvidia,tegra186-i2c"; reg = <0x0 0x03180000 0x0 0x10000>; interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&bpmp TEGRA186_CLK_I2C3>; clock-names = "div-clk"; resets = <&bpmp TEGRA186_RESET_I2C3>; reset-names = "i2c"; dmas = <&gpcdma 23>, <&gpcdma 23>; dma-names = "rx", "tx"; status = "disabled"; }; /* shares pads with dpaux1 */ dp_aux_ch1_i2c: i2c@3190000 { compatible = "nvidia,tegra186-i2c"; reg = <0x0 0x03190000 0x0 0x10000>; interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&bpmp TEGRA186_CLK_I2C4>; clock-names = "div-clk"; resets = <&bpmp TEGRA186_RESET_I2C4>; reset-names = "i2c"; pinctrl-names = "default", "idle"; pinctrl-0 = <&state_dpaux1_i2c>; pinctrl-1 = <&state_dpaux1_off>; dmas = <&gpcdma 26>, <&gpcdma 26>; dma-names = "rx", "tx"; status = "disabled"; }; /* controlled by BPMP, should not be enabled */ pwr_i2c: i2c@31a0000 { compatible = "nvidia,tegra186-i2c"; reg = <0x0 0x031a0000 0x0 0x10000>; interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&bpmp TEGRA186_CLK_I2C5>; clock-names = "div-clk"; resets = <&bpmp TEGRA186_RESET_I2C5>; reset-names = "i2c"; status = "disabled"; }; /* shares pads with dpaux0 */ dp_aux_ch0_i2c: i2c@31b0000 { compatible = "nvidia,tegra186-i2c"; reg = <0x0 0x031b0000 0x0 0x10000>; interrupts = <GIC_SPI 30 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&bpmp TEGRA186_CLK_I2C6>; clock-names = "div-clk"; resets = <&bpmp TEGRA186_RESET_I2C6>; reset-names = "i2c"; pinctrl-names = "default", "idle"; pinctrl-0 = <&state_dpaux_i2c>; pinctrl-1 = <&state_dpaux_off>; dmas = <&gpcdma 30>, <&gpcdma 30>; dma-names = "rx", "tx"; status = "disabled"; }; gen7_i2c: i2c@31c0000 { compatible = "nvidia,tegra186-i2c"; reg = <0x0 0x031c0000 0x0 0x10000>; interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&bpmp TEGRA186_CLK_I2C7>; clock-names = "div-clk"; resets = <&bpmp TEGRA186_RESET_I2C7>; reset-names = "i2c"; dmas = <&gpcdma 27>, <&gpcdma 27>; dma-names = "rx", "tx"; status = "disabled"; }; gen9_i2c: i2c@31e0000 { compatible = "nvidia,tegra186-i2c"; reg = <0x0 0x031e0000 0x0 0x10000>; interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&bpmp TEGRA186_CLK_I2C9>; clock-names = "div-clk"; resets = <&bpmp TEGRA186_RESET_I2C9>; reset-names = "i2c"; dmas = <&gpcdma 31>, <&gpcdma 31>; dma-names = "rx", "tx"; status = "disabled"; }; pwm1: pwm@3280000 { compatible = "nvidia,tegra186-pwm"; reg = <0x0 0x3280000 0x0 0x10000>; clocks = <&bpmp TEGRA186_CLK_PWM1>; resets = <&bpmp TEGRA186_RESET_PWM1>; reset-names = "pwm"; status = "disabled"; #pwm-cells = <2>; }; pwm2: pwm@3290000 { compatible = "nvidia,tegra186-pwm"; reg = <0x0 0x3290000 0x0 0x10000>; clocks = <&bpmp TEGRA186_CLK_PWM2>; resets = <&bpmp TEGRA186_RESET_PWM2>; reset-names = "pwm"; status = "disabled"; #pwm-cells = <2>; }; pwm3: pwm@32a0000 { compatible = "nvidia,tegra186-pwm"; reg = <0x0 0x32a0000 0x0 0x10000>; clocks = <&bpmp TEGRA186_CLK_PWM3>; resets = <&bpmp TEGRA186_RESET_PWM3>; reset-names = "pwm"; status = "disabled"; #pwm-cells = <2>; }; pwm5: pwm@32c0000 { compatible = "nvidia,tegra186-pwm"; reg = <0x0 0x32c0000 0x0 0x10000>; clocks = <&bpmp TEGRA186_CLK_PWM5>; resets = <&bpmp TEGRA186_RESET_PWM5>; reset-names = "pwm"; status = "disabled"; #pwm-cells = <2>; }; pwm6: pwm@32d0000 { compatible = "nvidia,tegra186-pwm"; reg = <0x0 0x32d0000 0x0 0x10000>; clocks = <&bpmp TEGRA186_CLK_PWM6>; resets = <&bpmp TEGRA186_RESET_PWM6>; reset-names = "pwm"; status = "disabled"; #pwm-cells = <2>; }; pwm7: pwm@32e0000 { compatible = "nvidia,tegra186-pwm"; reg = <0x0 0x32e0000 0x0 0x10000>; clocks = <&bpmp TEGRA186_CLK_PWM7>; resets = <&bpmp TEGRA186_RESET_PWM7>; reset-names = "pwm"; status = "disabled"; #pwm-cells = <2>; }; pwm8: pwm@32f0000 { compatible = "nvidia,tegra186-pwm"; reg = <0x0 0x32f0000 0x0 0x10000>; clocks = <&bpmp TEGRA186_CLK_PWM8>; resets = <&bpmp TEGRA186_RESET_PWM8>; reset-names = "pwm"; status = "disabled"; #pwm-cells = <2>; }; sdmmc1: mmc@3400000 { compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03400000 0x0 0x10000>; interrupts = <GIC_SPI 62 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_SDMMC1>, <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC1>; reset-names = "sdhci"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRA &emc>, <&mc TEGRA186_MEMORY_CLIENT_SDMMCWA &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_SDMMC1>; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8"; pinctrl-0 = <&sdmmc1_3v3>; pinctrl-1 = <&sdmmc1_1v8>; nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x07>; nvidia,pad-autocal-pull-down-offset-3v3-timeout = <0x06>; nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x07>; nvidia,pad-autocal-pull-down-offset-1v8-timeout = <0x07>; nvidia,pad-autocal-pull-up-offset-sdr104 = <0x03>; nvidia,pad-autocal-pull-down-offset-sdr104 = <0x05>; nvidia,default-tap = <0x5>; nvidia,default-trim = <0xb>; assigned-clocks = <&bpmp TEGRA186_CLK_SDMMC1>, <&bpmp TEGRA186_CLK_PLLP_OUT0>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLP_OUT0>; status = "disabled"; }; sdmmc2: mmc@3420000 { compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03420000 0x0 0x10000>; interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_SDMMC2>, <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC2>; reset-names = "sdhci"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRAA &emc>, <&mc TEGRA186_MEMORY_CLIENT_SDMMCWAA &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_SDMMC2>; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8"; pinctrl-0 = <&sdmmc2_3v3>; pinctrl-1 = <&sdmmc2_1v8>; nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x07>; nvidia,pad-autocal-pull-down-offset-3v3-timeout = <0x06>; nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x07>; nvidia,pad-autocal-pull-down-offset-1v8-timeout = <0x07>; nvidia,default-tap = <0x5>; nvidia,default-trim = <0xb>; status = "disabled"; }; sdmmc3: mmc@3440000 { compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03440000 0x0 0x10000>; interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_SDMMC3>, <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; clock-names = "sdhci", "tmclk"; resets = <&bpmp TEGRA186_RESET_SDMMC3>; reset-names = "sdhci"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCR &emc>, <&mc TEGRA186_MEMORY_CLIENT_SDMMCW &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_SDMMC3>; pinctrl-names = "sdmmc-3v3", "sdmmc-1v8"; pinctrl-0 = <&sdmmc3_3v3>; pinctrl-1 = <&sdmmc3_1v8>; nvidia,pad-autocal-pull-up-offset-1v8 = <0x00>; nvidia,pad-autocal-pull-down-offset-1v8 = <0x7a>; nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x07>; nvidia,pad-autocal-pull-down-offset-3v3-timeout = <0x06>; nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x07>; nvidia,pad-autocal-pull-down-offset-1v8-timeout = <0x07>; nvidia,default-tap = <0x5>; nvidia,default-trim = <0xb>; status = "disabled"; }; sdmmc4: mmc@3460000 { compatible = "nvidia,tegra186-sdhci"; reg = <0x0 0x03460000 0x0 0x10000>; interrupts = <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_SDMMC4>, <&bpmp TEGRA186_CLK_SDMMC_LEGACY_TM>; clock-names = "sdhci", "tmclk"; assigned-clocks = <&bpmp TEGRA186_CLK_SDMMC4>, <&bpmp TEGRA186_CLK_PLLC4_VCO>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLC4_VCO>; resets = <&bpmp TEGRA186_RESET_SDMMC4>; reset-names = "sdhci"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SDMMCRAB &emc>, <&mc TEGRA186_MEMORY_CLIENT_SDMMCWAB &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_SDMMC4>; nvidia,pad-autocal-pull-up-offset-hs400 = <0x05>; nvidia,pad-autocal-pull-down-offset-hs400 = <0x05>; nvidia,pad-autocal-pull-up-offset-1v8-timeout = <0x0a>; nvidia,pad-autocal-pull-down-offset-1v8-timeout = <0x0a>; nvidia,pad-autocal-pull-up-offset-3v3-timeout = <0x0a>; nvidia,pad-autocal-pull-down-offset-3v3-timeout = <0x0a>; nvidia,default-tap = <0x9>; nvidia,default-trim = <0x5>; nvidia,dqs-trim = <63>; mmc-hs400-1_8v; supports-cqe; status = "disabled"; }; sata@3507000 { compatible = "nvidia,tegra186-ahci"; reg = <0x0 0x03507000 0x0 0x00002000>, /* AHCI */ <0x0 0x03500000 0x0 0x00007000>, /* SATA */ <0x0 0x03A90000 0x0 0x00010000>; /* SATA AUX */ interrupts = <GIC_SPI 197 IRQ_TYPE_LEVEL_HIGH>; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_SAX>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_SATAR &emc>, <&mc TEGRA186_MEMORY_CLIENT_SATAW &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_SATA>; clocks = <&bpmp TEGRA186_CLK_SATA>, <&bpmp TEGRA186_CLK_SATA_OOB>; clock-names = "sata", "sata-oob"; assigned-clocks = <&bpmp TEGRA186_CLK_SATA>, <&bpmp TEGRA186_CLK_SATA_OOB>; assigned-clock-parents = <&bpmp TEGRA186_CLK_PLLP_OUT0>, <&bpmp TEGRA186_CLK_PLLP>; assigned-clock-rates = <102000000>, <204000000>; resets = <&bpmp TEGRA186_RESET_SATA>, <&bpmp TEGRA186_RESET_SATACOLD>; reset-names = "sata", "sata-cold"; status = "disabled"; }; hda@3510000 { compatible = "nvidia,tegra186-hda", "nvidia,tegra30-hda"; reg = <0x0 0x03510000 0x0 0x10000>; interrupts = <GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_HDA>, <&bpmp TEGRA186_CLK_HDA2HDMICODEC>, <&bpmp TEGRA186_CLK_HDA2CODEC_2X>; clock-names = "hda", "hda2hdmi", "hda2codec_2x"; resets = <&bpmp TEGRA186_RESET_HDA>, <&bpmp TEGRA186_RESET_HDA2HDMICODEC>, <&bpmp TEGRA186_RESET_HDA2CODEC_2X>; reset-names = "hda", "hda2hdmi", "hda2codec_2x"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_HDAR &emc>, <&mc TEGRA186_MEMORY_CLIENT_HDAW &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_HDA>; status = "disabled"; }; padctl: padctl@3520000 { compatible = "nvidia,tegra186-xusb-padctl"; reg = <0x0 0x03520000 0x0 0x1000>, <0x0 0x03540000 0x0 0x1000>; reg-names = "padctl", "ao"; interrupts = <GIC_SPI 167 IRQ_TYPE_LEVEL_HIGH>; resets = <&bpmp TEGRA186_RESET_XUSB_PADCTL>; reset-names = "padctl"; status = "disabled"; pads { usb2 { clocks = <&bpmp TEGRA186_CLK_USB2_TRK>; clock-names = "trk"; status = "disabled"; lanes { usb2-0 { status = "disabled"; #phy-cells = <0>; }; usb2-1 { status = "disabled"; #phy-cells = <0>; }; usb2-2 { status = "disabled"; #phy-cells = <0>; }; }; }; hsic { clocks = <&bpmp TEGRA186_CLK_HSIC_TRK>; clock-names = "trk"; status = "disabled"; lanes { hsic-0 { status = "disabled"; #phy-cells = <0>; }; }; }; usb3 { status = "disabled"; lanes { usb3-0 { status = "disabled"; #phy-cells = <0>; }; usb3-1 { status = "disabled"; #phy-cells = <0>; }; usb3-2 { status = "disabled"; #phy-cells = <0>; }; }; }; }; ports { usb2-0 { status = "disabled"; }; usb2-1 { status = "disabled"; }; usb2-2 { status = "disabled"; }; hsic-0 { status = "disabled"; }; usb3-0 { status = "disabled"; }; usb3-1 { status = "disabled"; }; usb3-2 { status = "disabled"; }; }; }; usb@3530000 { compatible = "nvidia,tegra186-xusb"; reg = <0x0 0x03530000 0x0 0x8000>, <0x0 0x03538000 0x0 0x1000>; reg-names = "hcd", "fpci"; interrupts = <GIC_SPI 163 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 164 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_XUSB_HOST>, <&bpmp TEGRA186_CLK_XUSB_FALCON>, <&bpmp TEGRA186_CLK_XUSB_SS>, <&bpmp TEGRA186_CLK_XUSB_CORE_SS>, <&bpmp TEGRA186_CLK_CLK_M>, <&bpmp TEGRA186_CLK_XUSB_FS>, <&bpmp TEGRA186_CLK_PLLU>, <&bpmp TEGRA186_CLK_CLK_M>, <&bpmp TEGRA186_CLK_PLLE>; clock-names = "xusb_host", "xusb_falcon_src", "xusb_ss", "xusb_ss_src", "xusb_hs_src", "xusb_fs_src", "pll_u_480m", "clk_m", "pll_e"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_XUSBC>, <&bpmp TEGRA186_POWER_DOMAIN_XUSBA>; power-domain-names = "xusb_host", "xusb_ss"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_XUSB_HOSTR &emc>, <&mc TEGRA186_MEMORY_CLIENT_XUSB_HOSTW &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_XUSB_HOST>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; nvidia,xusb-padctl = <&padctl>; }; usb@3550000 { compatible = "nvidia,tegra186-xudc"; reg = <0x0 0x03550000 0x0 0x8000>, <0x0 0x03558000 0x0 0x1000>; reg-names = "base", "fpci"; interrupts = <GIC_SPI 166 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_XUSB_CORE_DEV>, <&bpmp TEGRA186_CLK_XUSB_SS>, <&bpmp TEGRA186_CLK_XUSB_CORE_SS>, <&bpmp TEGRA186_CLK_XUSB_FS>; clock-names = "dev", "ss", "ss_src", "fs_src"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_XUSB_DEVR &emc>, <&mc TEGRA186_MEMORY_CLIENT_XUSB_DEVW &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_XUSB_DEV>; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_XUSBB>, <&bpmp TEGRA186_POWER_DOMAIN_XUSBA>; power-domain-names = "dev", "ss"; nvidia,xusb-padctl = <&padctl>; status = "disabled"; }; fuse@3820000 { compatible = "nvidia,tegra186-efuse"; reg = <0x0 0x03820000 0x0 0x10000>; clocks = <&bpmp TEGRA186_CLK_FUSE>; clock-names = "fuse"; }; gic: interrupt-controller@3881000 { compatible = "arm,gic-400"; #interrupt-cells = <3>; interrupt-controller; reg = <0x0 0x03881000 0x0 0x1000>, <0x0 0x03882000 0x0 0x2000>, <0x0 0x03884000 0x0 0x2000>, <0x0 0x03886000 0x0 0x2000>; interrupts = <GIC_PPI 9 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; interrupt-parent = <&gic>; }; cec@3960000 { compatible = "nvidia,tegra186-cec"; reg = <0x0 0x03960000 0x0 0x10000>; interrupts = <GIC_SPI 162 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_CEC>; clock-names = "cec"; status = "disabled"; }; hsp_top0: hsp@3c00000 { compatible = "nvidia,tegra186-hsp"; reg = <0x0 0x03c00000 0x0 0xa0000>; interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "doorbell"; #mbox-cells = <2>; status = "disabled"; }; gen2_i2c: i2c@c240000 { compatible = "nvidia,tegra186-i2c"; reg = <0x0 0x0c240000 0x0 0x10000>; interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&bpmp TEGRA186_CLK_I2C2>; clock-names = "div-clk"; resets = <&bpmp TEGRA186_RESET_I2C2>; reset-names = "i2c"; dmas = <&gpcdma 22>, <&gpcdma 22>; dma-names = "rx", "tx"; status = "disabled"; }; gen8_i2c: i2c@c250000 { compatible = "nvidia,tegra186-i2c"; reg = <0x0 0x0c250000 0x0 0x10000>; interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; #address-cells = <1>; #size-cells = <0>; clocks = <&bpmp TEGRA186_CLK_I2C8>; clock-names = "div-clk"; resets = <&bpmp TEGRA186_RESET_I2C8>; reset-names = "i2c"; dmas = <&gpcdma 0>, <&gpcdma 0>; dma-names = "rx", "tx"; status = "disabled"; }; uartc: serial@c280000 { compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart"; reg = <0x0 0x0c280000 0x0 0x40>; reg-shift = <2>; interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_UARTC>; clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTC>; reset-names = "serial"; status = "disabled"; }; uartg: serial@c290000 { compatible = "nvidia,tegra186-uart", "nvidia,tegra20-uart"; reg = <0x0 0x0c290000 0x0 0x40>; reg-shift = <2>; interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_UARTG>; clock-names = "serial"; resets = <&bpmp TEGRA186_RESET_UARTG>; reset-names = "serial"; status = "disabled"; }; rtc: rtc@c2a0000 { compatible = "nvidia,tegra186-rtc", "nvidia,tegra20-rtc"; reg = <0 0x0c2a0000 0 0x10000>; interrupt-parent = <&pmc>; interrupts = <73 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_CLK_32K>; clock-names = "rtc"; status = "disabled"; }; gpio_aon: gpio@c2f0000 { compatible = "nvidia,tegra186-gpio-aon"; reg-names = "security", "gpio"; reg = <0x0 0xc2f0000 0x0 0x1000>, <0x0 0xc2f1000 0x0 0x1000>; interrupts = <GIC_SPI 60 IRQ_TYPE_LEVEL_HIGH>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; }; pwm4: pwm@c340000 { compatible = "nvidia,tegra186-pwm"; reg = <0x0 0xc340000 0x0 0x10000>; clocks = <&bpmp TEGRA186_CLK_PWM4>; resets = <&bpmp TEGRA186_RESET_PWM4>; reset-names = "pwm"; status = "disabled"; #pwm-cells = <2>; }; pmc: pmc@c360000 { compatible = "nvidia,tegra186-pmc"; reg = <0 0x0c360000 0 0x10000>, <0 0x0c370000 0 0x10000>, <0 0x0c380000 0 0x10000>, <0 0x0c390000 0 0x10000>; reg-names = "pmc", "wake", "aotag", "scratch"; #interrupt-cells = <2>; interrupt-controller; sdmmc1_1v8: sdmmc1-1v8 { pins = "sdmmc1-hv"; power-source = <TEGRA_IO_PAD_VOLTAGE_1V8>; }; sdmmc1_3v3: sdmmc1-3v3 { pins = "sdmmc1-hv"; power-source = <TEGRA_IO_PAD_VOLTAGE_3V3>; }; sdmmc2_1v8: sdmmc2-1v8 { pins = "sdmmc2-hv"; power-source = <TEGRA_IO_PAD_VOLTAGE_1V8>; }; sdmmc2_3v3: sdmmc2-3v3 { pins = "sdmmc2-hv"; power-source = <TEGRA_IO_PAD_VOLTAGE_3V3>; }; sdmmc3_1v8: sdmmc3-1v8 { pins = "sdmmc3-hv"; power-source = <TEGRA_IO_PAD_VOLTAGE_1V8>; }; sdmmc3_3v3: sdmmc3-3v3 { pins = "sdmmc3-hv"; power-source = <TEGRA_IO_PAD_VOLTAGE_3V3>; }; }; ccplex@e000000 { compatible = "nvidia,tegra186-ccplex-cluster"; reg = <0x0 0x0e000000 0x0 0x400000>; nvidia,bpmp = <&bpmp>; }; pcie@10003000 { compatible = "nvidia,tegra186-pcie"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_PCX>; device_type = "pci"; reg = <0x0 0x10003000 0x0 0x00000800>, /* PADS registers */ <0x0 0x10003800 0x0 0x00000800>, /* AFI registers */ <0x0 0x40000000 0x0 0x10000000>; /* configuration space */ reg-names = "pads", "afi", "cs"; interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>, /* controller interrupt */ <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; /* MSI interrupt */ interrupt-names = "intr", "msi"; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; interrupt-map = <0 0 0 0 &gic GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>; bus-range = <0x00 0xff>; #address-cells = <3>; #size-cells = <2>; ranges = <0x02000000 0 0x10000000 0x0 0x10000000 0 0x00001000>, /* port 0 configuration space */ <0x02000000 0 0x10001000 0x0 0x10001000 0 0x00001000>,/* port 1 configuration space */ <0x02000000 0 0x10004000 0x0 0x10004000 0 0x00001000>, /* port 2 configuration space */ <0x01000000 0 0x0 0x0 0x50000000 0 0x00010000>, /* downstream I/O (64 KiB) */ <0x02000000 0 0x50100000 0x0 0x50100000 0 0x07f00000>, /* non-prefetchable memory (127 MiB) */ <0x42000000 0 0x58000000 0x0 0x58000000 0 0x28000000>; /* prefetchable memory (640 MiB) */ clocks = <&bpmp TEGRA186_CLK_PCIE>, <&bpmp TEGRA186_CLK_AFI>, <&bpmp TEGRA186_CLK_PLLE>; clock-names = "pex", "afi", "pll_e"; resets = <&bpmp TEGRA186_RESET_PCIE>, <&bpmp TEGRA186_RESET_AFI>, <&bpmp TEGRA186_RESET_PCIEXCLK>; reset-names = "pex", "afi", "pcie_x"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_AFIR &emc>, <&mc TEGRA186_MEMORY_CLIENT_AFIW &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_AFI>; iommu-map = <0x0 &smmu TEGRA186_SID_AFI 0x1000>; iommu-map-mask = <0x0>; status = "disabled"; pci@1,0 { device_type = "pci"; assigned-addresses = <0x82000800 0 0x10000000 0 0x1000>; reg = <0x000800 0 0 0 0>; status = "disabled"; #address-cells = <3>; #size-cells = <2>; ranges; nvidia,num-lanes = <2>; }; pci@2,0 { device_type = "pci"; assigned-addresses = <0x82001000 0 0x10001000 0 0x1000>; reg = <0x001000 0 0 0 0>; status = "disabled"; #address-cells = <3>; #size-cells = <2>; ranges; nvidia,num-lanes = <1>; }; pci@3,0 { device_type = "pci"; assigned-addresses = <0x82001800 0 0x10004000 0 0x1000>; reg = <0x001800 0 0 0 0>; status = "disabled"; #address-cells = <3>; #size-cells = <2>; ranges; nvidia,num-lanes = <1>; }; }; smmu: iommu@12000000 { compatible = "nvidia,tegra186-smmu", "nvidia,smmu-500"; reg = <0 0x12000000 0 0x800000>; interrupts = <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>; stream-match-mask = <0x7f80>; #global-interrupts = <1>; #iommu-cells = <1>; nvidia,memory-controller = <&mc>; }; host1x@13e00000 { compatible = "nvidia,tegra186-host1x"; reg = <0x0 0x13e00000 0x0 0x10000>, <0x0 0x13e10000 0x0 0x10000>; reg-names = "hypervisor", "vm"; interrupts = <GIC_SPI 265 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 263 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "syncpt", "host1x"; clocks = <&bpmp TEGRA186_CLK_HOST1X>; clock-names = "host1x"; resets = <&bpmp TEGRA186_RESET_HOST1X>; reset-names = "host1x"; #address-cells = <1>; #size-cells = <1>; ranges = <0x15000000 0x0 0x15000000 0x01000000>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_HOST1XDMAR &emc>; interconnect-names = "dma-mem"; iommus = <&smmu TEGRA186_SID_HOST1X>; /* Context isolation domains */ iommu-map = <0 &smmu TEGRA186_SID_HOST1X_CTX0 1>, <1 &smmu TEGRA186_SID_HOST1X_CTX1 1>, <2 &smmu TEGRA186_SID_HOST1X_CTX2 1>, <3 &smmu TEGRA186_SID_HOST1X_CTX3 1>, <4 &smmu TEGRA186_SID_HOST1X_CTX4 1>, <5 &smmu TEGRA186_SID_HOST1X_CTX5 1>, <6 &smmu TEGRA186_SID_HOST1X_CTX6 1>, <7 &smmu TEGRA186_SID_HOST1X_CTX7 1>; dpaux1: dpaux@15040000 { compatible = "nvidia,tegra186-dpaux"; reg = <0x15040000 0x10000>; interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_DPAUX1>, <&bpmp TEGRA186_CLK_PLLDP>; clock-names = "dpaux", "parent"; resets = <&bpmp TEGRA186_RESET_DPAUX1>; reset-names = "dpaux"; status = "disabled"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; state_dpaux1_aux: pinmux-aux { groups = "dpaux-io"; function = "aux"; }; state_dpaux1_i2c: pinmux-i2c { groups = "dpaux-io"; function = "i2c"; }; state_dpaux1_off: pinmux-off { groups = "dpaux-io"; function = "off"; }; i2c-bus { #address-cells = <1>; #size-cells = <0>; }; }; display-hub@15200000 { compatible = "nvidia,tegra186-display"; reg = <0x15200000 0x00040000>; resets = <&bpmp TEGRA186_RESET_NVDISPLAY0_MISC>, <&bpmp TEGRA186_RESET_NVDISPLAY0_WGRP0>, <&bpmp TEGRA186_RESET_NVDISPLAY0_WGRP1>, <&bpmp TEGRA186_RESET_NVDISPLAY0_WGRP2>, <&bpmp TEGRA186_RESET_NVDISPLAY0_WGRP3>, <&bpmp TEGRA186_RESET_NVDISPLAY0_WGRP4>, <&bpmp TEGRA186_RESET_NVDISPLAY0_WGRP5>; reset-names = "misc", "wgrp0", "wgrp1", "wgrp2", "wgrp3", "wgrp4", "wgrp5"; clocks = <&bpmp TEGRA186_CLK_NVDISPLAY_DISP>, <&bpmp TEGRA186_CLK_NVDISPLAY_DSC>, <&bpmp TEGRA186_CLK_NVDISPLAYHUB>; clock-names = "disp", "dsc", "hub"; status = "disabled"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; #address-cells = <1>; #size-cells = <1>; ranges = <0x15200000 0x15200000 0x40000>; display@15200000 { compatible = "nvidia,tegra186-dc"; reg = <0x15200000 0x10000>; interrupts = <GIC_SPI 153 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_NVDISPLAY_P0>; clock-names = "dc"; resets = <&bpmp TEGRA186_RESET_NVDISPLAY0_HEAD0>; reset-names = "dc"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_NVDISPLAYR &emc>, <&mc TEGRA186_MEMORY_CLIENT_NVDISPLAYR1 &emc>; interconnect-names = "dma-mem", "read-1"; iommus = <&smmu TEGRA186_SID_NVDISPLAY>; nvidia,outputs = <&dsia &dsib &sor0 &sor1>; nvidia,head = <0>; }; display@15210000 { compatible = "nvidia,tegra186-dc"; reg = <0x15210000 0x10000>; interrupts = <GIC_SPI 154 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_NVDISPLAY_P1>; clock-names = "dc"; resets = <&bpmp TEGRA186_RESET_NVDISPLAY0_HEAD1>; reset-names = "dc"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISPB>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_NVDISPLAYR &emc>, <&mc TEGRA186_MEMORY_CLIENT_NVDISPLAYR1 &emc>; interconnect-names = "dma-mem", "read-1"; iommus = <&smmu TEGRA186_SID_NVDISPLAY>; nvidia,outputs = <&dsia &dsib &sor0 &sor1>; nvidia,head = <1>; }; display@15220000 { compatible = "nvidia,tegra186-dc"; reg = <0x15220000 0x10000>; interrupts = <GIC_SPI 155 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_NVDISPLAY_P2>; clock-names = "dc"; resets = <&bpmp TEGRA186_RESET_NVDISPLAY0_HEAD2>; reset-names = "dc"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISPC>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_NVDISPLAYR &emc>, <&mc TEGRA186_MEMORY_CLIENT_NVDISPLAYR1 &emc>; interconnect-names = "dma-mem", "read-1"; iommus = <&smmu TEGRA186_SID_NVDISPLAY>; nvidia,outputs = <&sor0 &sor1>; nvidia,head = <2>; }; }; dsia: dsi@15300000 { compatible = "nvidia,tegra186-dsi"; reg = <0x15300000 0x10000>; interrupts = <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_DSI>, <&bpmp TEGRA186_CLK_DSIA_LP>, <&bpmp TEGRA186_CLK_PLLD>; clock-names = "dsi", "lp", "parent"; resets = <&bpmp TEGRA186_RESET_DSI>; reset-names = "dsi"; status = "disabled"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; }; vic@15340000 { compatible = "nvidia,tegra186-vic"; reg = <0x15340000 0x40000>; interrupts = <GIC_SPI 206 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_VIC>; clock-names = "vic"; resets = <&bpmp TEGRA186_RESET_VIC>; reset-names = "vic"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_VIC>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_VICSRD &emc>, <&mc TEGRA186_MEMORY_CLIENT_VICSWR &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_VIC>; }; nvjpg@15380000 { compatible = "nvidia,tegra186-nvjpg"; reg = <0x15380000 0x40000>; clocks = <&bpmp TEGRA186_CLK_NVJPG>; clock-names = "nvjpg"; resets = <&bpmp TEGRA186_RESET_NVJPG>; reset-names = "nvjpg"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_NVJPG>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_NVJPGSRD &emc>, <&mc TEGRA186_MEMORY_CLIENT_NVJPGSWR &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_NVJPG>; }; dsib: dsi@15400000 { compatible = "nvidia,tegra186-dsi"; reg = <0x15400000 0x10000>; interrupts = <GIC_SPI 21 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_DSIB>, <&bpmp TEGRA186_CLK_DSIB_LP>, <&bpmp TEGRA186_CLK_PLLD>; clock-names = "dsi", "lp", "parent"; resets = <&bpmp TEGRA186_RESET_DSIB>; reset-names = "dsi"; status = "disabled"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; }; nvdec@15480000 { compatible = "nvidia,tegra186-nvdec"; reg = <0x15480000 0x40000>; clocks = <&bpmp TEGRA186_CLK_NVDEC>; clock-names = "nvdec"; resets = <&bpmp TEGRA186_RESET_NVDEC>; reset-names = "nvdec"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_NVDEC>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_NVDECSRD &emc>, <&mc TEGRA186_MEMORY_CLIENT_NVDECSRD1 &emc>, <&mc TEGRA186_MEMORY_CLIENT_NVDECSWR &emc>; interconnect-names = "dma-mem", "read-1", "write"; iommus = <&smmu TEGRA186_SID_NVDEC>; }; nvenc@154c0000 { compatible = "nvidia,tegra186-nvenc"; reg = <0x154c0000 0x40000>; clocks = <&bpmp TEGRA186_CLK_NVENC>; clock-names = "nvenc"; resets = <&bpmp TEGRA186_RESET_NVENC>; reset-names = "nvenc"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_MPE>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_NVENCSRD &emc>, <&mc TEGRA186_MEMORY_CLIENT_NVENCSWR &emc>; interconnect-names = "dma-mem", "write"; iommus = <&smmu TEGRA186_SID_NVENC>; }; sor0: sor@15540000 { compatible = "nvidia,tegra186-sor"; reg = <0x15540000 0x10000>; interrupts = <GIC_SPI 157 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_SOR0>, <&bpmp TEGRA186_CLK_SOR0_OUT>, <&bpmp TEGRA186_CLK_PLLD2>, <&bpmp TEGRA186_CLK_PLLDP>, <&bpmp TEGRA186_CLK_SOR_SAFE>, <&bpmp TEGRA186_CLK_SOR0_PAD_CLKOUT>; clock-names = "sor", "out", "parent", "dp", "safe", "pad"; resets = <&bpmp TEGRA186_RESET_SOR0>; reset-names = "sor"; pinctrl-0 = <&state_dpaux_aux>; pinctrl-1 = <&state_dpaux_i2c>; pinctrl-2 = <&state_dpaux_off>; pinctrl-names = "aux", "i2c", "off"; status = "disabled"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; nvidia,interface = <0>; }; sor1: sor@15580000 { compatible = "nvidia,tegra186-sor"; reg = <0x15580000 0x10000>; interrupts = <GIC_SPI 158 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_SOR1>, <&bpmp TEGRA186_CLK_SOR1_OUT>, <&bpmp TEGRA186_CLK_PLLD3>, <&bpmp TEGRA186_CLK_PLLDP>, <&bpmp TEGRA186_CLK_SOR_SAFE>, <&bpmp TEGRA186_CLK_SOR1_PAD_CLKOUT>; clock-names = "sor", "out", "parent", "dp", "safe", "pad"; resets = <&bpmp TEGRA186_RESET_SOR1>; reset-names = "sor"; pinctrl-0 = <&state_dpaux1_aux>; pinctrl-1 = <&state_dpaux1_i2c>; pinctrl-2 = <&state_dpaux1_off>; pinctrl-names = "aux", "i2c", "off"; status = "disabled"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; nvidia,interface = <1>; }; dpaux: dpaux@155c0000 { compatible = "nvidia,tegra186-dpaux"; reg = <0x155c0000 0x10000>; interrupts = <GIC_SPI 159 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_DPAUX>, <&bpmp TEGRA186_CLK_PLLDP>; clock-names = "dpaux", "parent"; resets = <&bpmp TEGRA186_RESET_DPAUX>; reset-names = "dpaux"; status = "disabled"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; state_dpaux_aux: pinmux-aux { groups = "dpaux-io"; function = "aux"; }; state_dpaux_i2c: pinmux-i2c { groups = "dpaux-io"; function = "i2c"; }; state_dpaux_off: pinmux-off { groups = "dpaux-io"; function = "off"; }; i2c-bus { #address-cells = <1>; #size-cells = <0>; }; }; padctl@15880000 { compatible = "nvidia,tegra186-dsi-padctl"; reg = <0x15880000 0x10000>; resets = <&bpmp TEGRA186_RESET_DSI>; reset-names = "dsi"; status = "disabled"; }; dsic: dsi@15900000 { compatible = "nvidia,tegra186-dsi"; reg = <0x15900000 0x10000>; interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_DSIC>, <&bpmp TEGRA186_CLK_DSIC_LP>, <&bpmp TEGRA186_CLK_PLLD>; clock-names = "dsi", "lp", "parent"; resets = <&bpmp TEGRA186_RESET_DSIC>; reset-names = "dsi"; status = "disabled"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; }; dsid: dsi@15940000 { compatible = "nvidia,tegra186-dsi"; reg = <0x15940000 0x10000>; interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>; clocks = <&bpmp TEGRA186_CLK_DSID>, <&bpmp TEGRA186_CLK_DSID_LP>, <&bpmp TEGRA186_CLK_PLLD>; clock-names = "dsi", "lp", "parent"; resets = <&bpmp TEGRA186_RESET_DSID>; reset-names = "dsi"; status = "disabled"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_DISP>; }; }; gpu@17000000 { compatible = "nvidia,gp10b"; reg = <0x0 0x17000000 0x0 0x1000000>, <0x0 0x18000000 0x0 0x1000000>; interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "stall", "nonstall"; clocks = <&bpmp TEGRA186_CLK_GPCCLK>, <&bpmp TEGRA186_CLK_GPU>; clock-names = "gpu", "pwr"; resets = <&bpmp TEGRA186_RESET_GPU>; reset-names = "gpu"; status = "disabled"; power-domains = <&bpmp TEGRA186_POWER_DOMAIN_GPU>; interconnects = <&mc TEGRA186_MEMORY_CLIENT_GPUSRD &emc>, <&mc TEGRA186_MEMORY_CLIENT_GPUSWR &emc>, <&mc TEGRA186_MEMORY_CLIENT_GPUSRD2 &emc>, <&mc TEGRA186_MEMORY_CLIENT_GPUSWR2 &emc>; interconnect-names = "dma-mem", "write-0", "read-1", "write-1"; }; sram@30000000 { compatible = "nvidia,tegra186-sysram", "mmio-sram"; reg = <0x0 0x30000000 0x0 0x50000>; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x0 0x30000000 0x50000>; no-memory-wc; cpu_bpmp_tx: sram@4e000 { reg = <0x4e000 0x1000>; label = "cpu-bpmp-tx"; pool; }; cpu_bpmp_rx: sram@4f000 { reg = <0x4f000 0x1000>; label = "cpu-bpmp-rx"; pool; }; }; bpmp: bpmp { compatible = "nvidia,tegra186-bpmp"; interconnects = <&mc TEGRA186_MEMORY_CLIENT_BPMPR &emc>, <&mc TEGRA186_MEMORY_CLIENT_BPMPW &emc>, <&mc TEGRA186_MEMORY_CLIENT_BPMPDMAR &emc>, <&mc TEGRA186_MEMORY_CLIENT_BPMPDMAW &emc>; interconnect-names = "read", "write", "dma-mem", "dma-write"; iommus = <&smmu TEGRA186_SID_BPMP>; mboxes = <&hsp_top0 TEGRA_HSP_MBOX_TYPE_DB TEGRA_HSP_DB_MASTER_BPMP>; shmem = <&cpu_bpmp_tx>, <&cpu_bpmp_rx>; #clock-cells = <1>; #reset-cells = <1>; #power-domain-cells = <1>; bpmp_i2c: i2c { compatible = "nvidia,tegra186-bpmp-i2c"; nvidia,bpmp-bus-id = <5>; #address-cells = <1>; #size-cells = <0>; status = "disabled"; }; bpmp_thermal: thermal { compatible = "nvidia,tegra186-bpmp-thermal"; #thermal-sensor-cells = <1>; }; }; cpus { #address-cells = <1>; #size-cells = <0>; denver_0: cpu@0 { compatible = "nvidia,tegra186-denver"; device_type = "cpu"; i-cache-size = <0x20000>; i-cache-line-size = <64>; i-cache-sets = <512>; d-cache-size = <0x10000>; d-cache-line-size = <64>; d-cache-sets = <256>; next-level-cache = <&L2_DENVER>; reg = <0x000>; }; denver_1: cpu@1 { compatible = "nvidia,tegra186-denver"; device_type = "cpu"; i-cache-size = <0x20000>; i-cache-line-size = <64>; i-cache-sets = <512>; d-cache-size = <0x10000>; d-cache-line-size = <64>; d-cache-sets = <256>; next-level-cache = <&L2_DENVER>; reg = <0x001>; }; ca57_0: cpu@2 { compatible = "arm,cortex-a57"; device_type = "cpu"; i-cache-size = <0xC000>; i-cache-line-size = <64>; i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; d-cache-sets = <256>; next-level-cache = <&L2_A57>; reg = <0x100>; }; ca57_1: cpu@3 { compatible = "arm,cortex-a57"; device_type = "cpu"; i-cache-size = <0xC000>; i-cache-line-size = <64>; i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; d-cache-sets = <256>; next-level-cache = <&L2_A57>; reg = <0x101>; }; ca57_2: cpu@4 { compatible = "arm,cortex-a57"; device_type = "cpu"; i-cache-size = <0xC000>; i-cache-line-size = <64>; i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; d-cache-sets = <256>; next-level-cache = <&L2_A57>; reg = <0x102>; }; ca57_3: cpu@5 { compatible = "arm,cortex-a57"; device_type = "cpu"; i-cache-size = <0xC000>; i-cache-line-size = <64>; i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; d-cache-sets = <256>; next-level-cache = <&L2_A57>; reg = <0x103>; }; L2_DENVER: l2-cache0 { compatible = "cache"; cache-unified; cache-level = <2>; cache-size = <0x200000>; cache-line-size = <64>; cache-sets = <2048>; }; L2_A57: l2-cache1 { compatible = "cache"; cache-unified; cache-level = <2>; cache-size = <0x200000>; cache-line-size = <64>; cache-sets = <2048>; }; }; pmu-a57 { compatible = "arm,cortex-a57-pmu"; interrupts = <GIC_SPI 296 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 297 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 298 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 299 IRQ_TYPE_LEVEL_HIGH>; interrupt-affinity = <&ca57_0 &ca57_1 &ca57_2 &ca57_3>; }; pmu-denver { compatible = "nvidia,denver-pmu"; interrupts = <GIC_SPI 320 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 321 IRQ_TYPE_LEVEL_HIGH>; interrupt-affinity = <&denver_0 &denver_1>; }; sound { status = "disabled"; clocks = <&bpmp TEGRA186_CLK_PLLA>, <&bpmp TEGRA186_CLK_PLL_A_OUT0>; clock-names = "pll_a", "plla_out0"; assigned-clocks = <&bpmp TEGRA186_CLK_PLLA>, <&bpmp TEGRA186_CLK_PLL_A_OUT0>, <&bpmp TEGRA186_CLK_AUD_MCLK>; assigned-clock-parents = <0>, <&bpmp TEGRA186_CLK_PLLA>, <&bpmp TEGRA186_CLK_PLL_A_OUT0>; /* * PLLA supports dynamic ramp. Below initial rate is chosen * for this to work and oscillate between base rates required * for 8x and 11.025x sample rate streams. */ assigned-clock-rates = <258000000>; iommus = <&smmu TEGRA186_SID_APE>; }; thermal-zones { /* Cortex-A57 cluster */ cpu-thermal { polling-delay = <0>; polling-delay-passive = <1000>; thermal-sensors = <&bpmp_thermal TEGRA186_BPMP_THERMAL_ZONE_CPU>; trips { critical { temperature = <101000>; hysteresis = <0>; type = "critical"; }; }; cooling-maps { }; }; /* Denver cluster */ aux-thermal { polling-delay = <0>; polling-delay-passive = <1000>; thermal-sensors = <&bpmp_thermal TEGRA186_BPMP_THERMAL_ZONE_AUX>; trips { critical { temperature = <101000>; hysteresis = <0>; type = "critical"; }; }; cooling-maps { }; }; gpu-thermal { polling-delay = <0>; polling-delay-passive = <1000>; thermal-sensors = <&bpmp_thermal TEGRA186_BPMP_THERMAL_ZONE_GPU>; trips { critical { temperature = <101000>; hysteresis = <0>; type = "critical"; }; }; cooling-maps { }; }; pll-thermal { polling-delay = <0>; polling-delay-passive = <1000>; thermal-sensors = <&bpmp_thermal TEGRA186_BPMP_THERMAL_ZONE_PLLX>; trips { critical { temperature = <101000>; hysteresis = <0>; type = "critical"; }; }; cooling-maps { }; }; ao-thermal { polling-delay = <0>; polling-delay-passive = <1000>; thermal-sensors = <&bpmp_thermal TEGRA186_BPMP_THERMAL_ZONE_AO>; trips { critical { temperature = <101000>; hysteresis = <0>; type = "critical"; }; }; cooling-maps { }; }; }; timer { compatible = "arm,armv8-timer"; interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>; interrupt-parent = <&gic>; always-on; }; };
// SPDX-License-Identifier: GPL-2.0-only /* * CE4100 PCI-I2C glue code for PXA's driver * Author: Sebastian Andrzej Siewior <[email protected]> * * The CE4100's I2C device is more or less the same one as found on PXA. * It does not support target mode, the register slightly moved. This PCI * device provides three bars, every contains a single I2C controller. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/platform_data/i2c-pxa.h> #include <linux/of.h> #include <linux/of_address.h> #define CE4100_PCI_I2C_DEVS 3 struct ce4100_devices { struct platform_device *pdev[CE4100_PCI_I2C_DEVS]; }; static struct platform_device *add_i2c_device(struct pci_dev *dev, int bar) { struct platform_device *pdev; struct i2c_pxa_platform_data pdata; struct resource res[2]; struct device_node *child; static int devnum; int ret; memset(&pdata, 0, sizeof(struct i2c_pxa_platform_data)); memset(&res, 0, sizeof(res)); res[0].flags = IORESOURCE_MEM; res[0].start = pci_resource_start(dev, bar); res[0].end = pci_resource_end(dev, bar); res[1].flags = IORESOURCE_IRQ; res[1].start = dev->irq; res[1].end = dev->irq; for_each_child_of_node(dev->dev.of_node, child) { const void *prop; struct resource r; int ret; ret = of_address_to_resource(child, 0, &r); if (ret < 0) continue; if (r.start != res[0].start) continue; if (r.end != res[0].end) continue; if (r.flags != res[0].flags) continue; prop = of_get_property(child, "fast-mode", NULL); if (prop) pdata.fast_mode = 1; break; } if (!child) { dev_err(&dev->dev, "failed to match a DT node for bar %d.\n", bar); ret = -EINVAL; goto out; } pdev = platform_device_alloc("ce4100-i2c", devnum); if (!pdev) { of_node_put(child); ret = -ENOMEM; goto out; } pdev->dev.parent = &dev->dev; pdev->dev.of_node = child; ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res)); if (ret) goto err; ret = platform_device_add_data(pdev, &pdata, sizeof(pdata)); if (ret) goto err; ret = platform_device_add(pdev); if (ret) goto err; devnum++; return pdev; err: platform_device_put(pdev); out: return ERR_PTR(ret); } static int ce4100_i2c_probe(struct pci_dev *dev, const struct pci_device_id *ent) { int ret; int i; struct ce4100_devices *sds; ret = pcim_enable_device(dev); if (ret) return ret; if (!dev->dev.of_node) { dev_err(&dev->dev, "Missing device tree node.\n"); return -EINVAL; } sds = kzalloc(sizeof(*sds), GFP_KERNEL); if (!sds) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(sds->pdev); i++) { sds->pdev[i] = add_i2c_device(dev, i); if (IS_ERR(sds->pdev[i])) { ret = PTR_ERR(sds->pdev[i]); while (--i >= 0) platform_device_unregister(sds->pdev[i]); goto err_dev_add; } } pci_set_drvdata(dev, sds); return 0; err_dev_add: kfree(sds); return ret; } static const struct pci_device_id ce4100_i2c_devices[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2e68)}, { } }; static struct pci_driver ce4100_i2c_driver = { .driver = { .suppress_bind_attrs = true, }, .name = "ce4100_i2c", .id_table = ce4100_i2c_devices, .probe = ce4100_i2c_probe, }; builtin_pci_driver(ce4100_i2c_driver);
// SPDX-License-Identifier: GPL-2.0 OR MIT /************************************************************************** * * Copyright (c) 2018 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Deepak Rawat <[email protected]> * Rob Clark <[email protected]> * **************************************************************************/ #include <drm/drm_atomic.h> #include <drm/drm_damage_helper.h> #include <drm/drm_device.h> #include <drm/drm_framebuffer.h> static void convert_clip_rect_to_rect(const struct drm_clip_rect *src, struct drm_mode_rect *dest, uint32_t num_clips, uint32_t src_inc) { while (num_clips > 0) { dest->x1 = src->x1; dest->y1 = src->y1; dest->x2 = src->x2; dest->y2 = src->y2; src += src_inc; dest++; num_clips--; } } /** * drm_atomic_helper_check_plane_damage - Verify plane damage on atomic_check. * @state: The driver state object. * @plane_state: Plane state for which to verify damage. * * This helper function makes sure that damage from plane state is discarded * for full modeset. If there are more reasons a driver would want to do a full * plane update rather than processing individual damage regions, then those * cases should be taken care of here. * * Note that &drm_plane_state.fb_damage_clips == NULL in plane state means that * full plane update should happen. It also ensure helper iterator will return * &drm_plane_state.src as damage. */ void drm_atomic_helper_check_plane_damage(struct drm_atomic_state *state, struct drm_plane_state *plane_state) { struct drm_crtc_state *crtc_state; if (plane_state->crtc) { crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); if (WARN_ON(!crtc_state)) return; if (drm_atomic_crtc_needs_modeset(crtc_state)) { drm_property_blob_put(plane_state->fb_damage_clips); plane_state->fb_damage_clips = NULL; } } } EXPORT_SYMBOL(drm_atomic_helper_check_plane_damage); /** * drm_atomic_helper_dirtyfb - Helper for dirtyfb. * @fb: DRM framebuffer. * @file_priv: Drm file for the ioctl call. * @flags: Dirty fb annotate flags. * @color: Color for annotate fill. * @clips: Dirty region. * @num_clips: Count of clip in clips. * * A helper to implement &drm_framebuffer_funcs.dirty using damage interface * during plane update. If num_clips is 0 then this helper will do a full plane * update. This is the same behaviour expected by DIRTFB IOCTL. * * Note that this helper is blocking implementation. This is what current * drivers and userspace expect in their DIRTYFB IOCTL implementation, as a way * to rate-limit userspace and make sure its rendering doesn't get ahead of * uploading new data too much. * * Return: Zero on success, negative errno on failure. */ int drm_atomic_helper_dirtyfb(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned int flags, unsigned int color, struct drm_clip_rect *clips, unsigned int num_clips) { struct drm_modeset_acquire_ctx ctx; struct drm_property_blob *damage = NULL; struct drm_mode_rect *rects = NULL; struct drm_atomic_state *state; struct drm_plane *plane; int ret = 0; /* * When called from ioctl, we are interruptible, but not when called * internally (ie. defio worker) */ drm_modeset_acquire_init(&ctx, file_priv ? DRM_MODESET_ACQUIRE_INTERRUPTIBLE : 0); state = drm_atomic_state_alloc(fb->dev); if (!state) { ret = -ENOMEM; goto out_drop_locks; } state->acquire_ctx = &ctx; if (clips) { uint32_t inc = 1; if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { inc = 2; num_clips /= 2; } rects = kcalloc(num_clips, sizeof(*rects), GFP_KERNEL); if (!rects) { ret = -ENOMEM; goto out; } convert_clip_rect_to_rect(clips, rects, num_clips, inc); damage = drm_property_create_blob(fb->dev, num_clips * sizeof(*rects), rects); if (IS_ERR(damage)) { ret = PTR_ERR(damage); damage = NULL; goto out; } } retry: drm_for_each_plane(plane, fb->dev) { struct drm_plane_state *plane_state; ret = drm_modeset_lock(&plane->mutex, state->acquire_ctx); if (ret) goto out; if (plane->state->fb != fb) { drm_modeset_unlock(&plane->mutex); continue; } plane_state = drm_atomic_get_plane_state(state, plane); if (IS_ERR(plane_state)) { ret = PTR_ERR(plane_state); goto out; } drm_property_replace_blob(&plane_state->fb_damage_clips, damage); } ret = drm_atomic_commit(state); out: if (ret == -EDEADLK) { drm_atomic_state_clear(state); ret = drm_modeset_backoff(&ctx); if (!ret) goto retry; } drm_property_blob_put(damage); kfree(rects); drm_atomic_state_put(state); out_drop_locks: drm_modeset_drop_locks(&ctx); drm_modeset_acquire_fini(&ctx); return ret; } EXPORT_SYMBOL(drm_atomic_helper_dirtyfb); /** * drm_atomic_helper_damage_iter_init - Initialize the damage iterator. * @iter: The iterator to initialize. * @old_state: Old plane state for validation. * @state: Plane state from which to iterate the damage clips. * * Initialize an iterator, which clips plane damage * &drm_plane_state.fb_damage_clips to plane &drm_plane_state.src. This iterator * returns full plane src in case damage is not present because either * user-space didn't sent or driver discarded it (it want to do full plane * update). Currently this iterator returns full plane src in case plane src * changed but that can be changed in future to return damage. * * For the case when plane is not visible or plane update should not happen the * first call to iter_next will return false. Note that this helper use clipped * &drm_plane_state.src, so driver calling this helper should have called * drm_atomic_helper_check_plane_state() earlier. */ void drm_atomic_helper_damage_iter_init(struct drm_atomic_helper_damage_iter *iter, const struct drm_plane_state *old_state, const struct drm_plane_state *state) { struct drm_rect src; memset(iter, 0, sizeof(*iter)); if (!state || !state->crtc || !state->fb || !state->visible) return; iter->clips = (struct drm_rect *)drm_plane_get_damage_clips(state); iter->num_clips = drm_plane_get_damage_clips_count(state); /* Round down for x1/y1 and round up for x2/y2 to catch all pixels */ src = drm_plane_state_src(state); iter->plane_src.x1 = src.x1 >> 16; iter->plane_src.y1 = src.y1 >> 16; iter->plane_src.x2 = (src.x2 >> 16) + !!(src.x2 & 0xFFFF); iter->plane_src.y2 = (src.y2 >> 16) + !!(src.y2 & 0xFFFF); if (!iter->clips || state->ignore_damage_clips || !drm_rect_equals(&state->src, &old_state->src)) { iter->clips = NULL; iter->num_clips = 0; iter->full_update = true; } } EXPORT_SYMBOL(drm_atomic_helper_damage_iter_init); /** * drm_atomic_helper_damage_iter_next - Advance the damage iterator. * @iter: The iterator to advance. * @rect: Return a rectangle in fb coordinate clipped to plane src. * * Since plane src is in 16.16 fixed point and damage clips are whole number, * this iterator round off clips that intersect with plane src. Round down for * x1/y1 and round up for x2/y2 for the intersected coordinate. Similar rounding * off for full plane src, in case it's returned as damage. This iterator will * skip damage clips outside of plane src. * * Return: True if the output is valid, false if reached the end. * * If the first call to iterator next returns false then it means no need to * update the plane. */ bool drm_atomic_helper_damage_iter_next(struct drm_atomic_helper_damage_iter *iter, struct drm_rect *rect) { bool ret = false; if (iter->full_update) { *rect = iter->plane_src; iter->full_update = false; return true; } while (iter->curr_clip < iter->num_clips) { *rect = iter->clips[iter->curr_clip]; iter->curr_clip++; if (drm_rect_intersect(rect, &iter->plane_src)) { ret = true; break; } } return ret; } EXPORT_SYMBOL(drm_atomic_helper_damage_iter_next); /** * drm_atomic_helper_damage_merged - Merged plane damage * @old_state: Old plane state for validation. * @state: Plane state from which to iterate the damage clips. * @rect: Returns the merged damage rectangle * * This function merges any valid plane damage clips into one rectangle and * returns it in @rect. * * For details see: drm_atomic_helper_damage_iter_init() and * drm_atomic_helper_damage_iter_next(). * * Returns: * True if there is valid plane damage otherwise false. */ bool drm_atomic_helper_damage_merged(const struct drm_plane_state *old_state, struct drm_plane_state *state, struct drm_rect *rect) { struct drm_atomic_helper_damage_iter iter; struct drm_rect clip; bool valid = false; rect->x1 = INT_MAX; rect->y1 = INT_MAX; rect->x2 = 0; rect->y2 = 0; drm_atomic_helper_damage_iter_init(&iter, old_state, state); drm_atomic_for_each_plane_damage(&iter, &clip) { rect->x1 = min(rect->x1, clip.x1); rect->y1 = min(rect->y1, clip.y1); rect->x2 = max(rect->x2, clip.x2); rect->y2 = max(rect->y2, clip.y2); valid = true; } return valid; } EXPORT_SYMBOL(drm_atomic_helper_damage_merged);
/* * Copyright 2021 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * */ #ifndef _athub_3_0_0_SH_MASK_HEADER #define _athub_3_0_0_SH_MASK_HEADER // addressBlock: athub_xpbdec //XPB_RTR_SRC_APRTR0 #define XPB_RTR_SRC_APRTR0__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR0__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR1 #define XPB_RTR_SRC_APRTR1__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR1__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR2 #define XPB_RTR_SRC_APRTR2__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR2__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR3 #define XPB_RTR_SRC_APRTR3__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR3__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR4 #define XPB_RTR_SRC_APRTR4__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR4__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR5 #define XPB_RTR_SRC_APRTR5__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR5__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR6 #define XPB_RTR_SRC_APRTR6__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR6__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR7 #define XPB_RTR_SRC_APRTR7__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR7__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR8 #define XPB_RTR_SRC_APRTR8__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR8__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR9 #define XPB_RTR_SRC_APRTR9__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR9__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR10 #define XPB_RTR_SRC_APRTR10__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR10__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR11 #define XPB_RTR_SRC_APRTR11__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR11__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR12 #define XPB_RTR_SRC_APRTR12__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR12__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_SRC_APRTR13 #define XPB_RTR_SRC_APRTR13__BASE_ADDR__SHIFT 0x0 #define XPB_RTR_SRC_APRTR13__BASE_ADDR_MASK 0x7FFFFFFFL //XPB_RTR_DEST_MAP0 #define XPB_RTR_DEST_MAP0__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP0__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP0__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP0__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP0__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP0__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP0__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP0__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP0__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP0__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP0__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP0__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP1 #define XPB_RTR_DEST_MAP1__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP1__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP1__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP1__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP1__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP1__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP1__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP1__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP1__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP1__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP1__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP1__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP2 #define XPB_RTR_DEST_MAP2__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP2__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP2__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP2__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP2__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP2__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP2__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP2__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP2__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP2__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP2__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP2__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP3 #define XPB_RTR_DEST_MAP3__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP3__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP3__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP3__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP3__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP3__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP3__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP3__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP3__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP3__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP3__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP3__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP4 #define XPB_RTR_DEST_MAP4__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP4__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP4__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP4__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP4__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP4__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP4__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP4__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP4__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP4__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP4__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP4__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP5 #define XPB_RTR_DEST_MAP5__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP5__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP5__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP5__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP5__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP5__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP5__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP5__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP5__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP5__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP5__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP5__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP6 #define XPB_RTR_DEST_MAP6__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP6__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP6__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP6__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP6__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP6__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP6__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP6__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP6__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP6__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP6__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP6__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP7 #define XPB_RTR_DEST_MAP7__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP7__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP7__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP7__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP7__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP7__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP7__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP7__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP7__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP7__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP7__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP7__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP8 #define XPB_RTR_DEST_MAP8__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP8__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP8__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP8__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP8__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP8__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP8__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP8__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP8__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP8__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP8__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP8__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP9 #define XPB_RTR_DEST_MAP9__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP9__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP9__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP9__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP9__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP9__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP9__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP9__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP9__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP9__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP9__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP9__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP10 #define XPB_RTR_DEST_MAP10__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP10__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP10__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP10__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP10__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP10__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP10__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP10__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP10__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP10__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP10__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP10__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP11 #define XPB_RTR_DEST_MAP11__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP11__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP11__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP11__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP11__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP11__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP11__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP11__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP11__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP11__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP11__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP11__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP12 #define XPB_RTR_DEST_MAP12__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP12__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP12__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP12__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP12__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP12__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP12__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP12__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP12__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP12__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP12__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP12__APRTR_SIZE_MASK 0x7C000000L //XPB_RTR_DEST_MAP13 #define XPB_RTR_DEST_MAP13__NMR__SHIFT 0x0 #define XPB_RTR_DEST_MAP13__DEST_OFFSET__SHIFT 0x1 #define XPB_RTR_DEST_MAP13__DEST_SEL__SHIFT 0x14 #define XPB_RTR_DEST_MAP13__DEST_SEL_RPB__SHIFT 0x18 #define XPB_RTR_DEST_MAP13__SIDE_OK__SHIFT 0x19 #define XPB_RTR_DEST_MAP13__APRTR_SIZE__SHIFT 0x1a #define XPB_RTR_DEST_MAP13__NMR_MASK 0x00000001L #define XPB_RTR_DEST_MAP13__DEST_OFFSET_MASK 0x000FFFFEL #define XPB_RTR_DEST_MAP13__DEST_SEL_MASK 0x00F00000L #define XPB_RTR_DEST_MAP13__DEST_SEL_RPB_MASK 0x01000000L #define XPB_RTR_DEST_MAP13__SIDE_OK_MASK 0x02000000L #define XPB_RTR_DEST_MAP13__APRTR_SIZE_MASK 0x7C000000L //XPB_CLG_CFG0 #define XPB_CLG_CFG0__WCB_NUM__SHIFT 0x0 #define XPB_CLG_CFG0__LB_TYPE__SHIFT 0x4 #define XPB_CLG_CFG0__P2P_BAR__SHIFT 0x7 #define XPB_CLG_CFG0__HOST_FLUSH__SHIFT 0xa #define XPB_CLG_CFG0__SIDE_FLUSH__SHIFT 0xe #define XPB_CLG_CFG0__WCB_NUM_MASK 0x0000000FL #define XPB_CLG_CFG0__LB_TYPE_MASK 0x00000070L #define XPB_CLG_CFG0__P2P_BAR_MASK 0x00000380L #define XPB_CLG_CFG0__HOST_FLUSH_MASK 0x00003C00L #define XPB_CLG_CFG0__SIDE_FLUSH_MASK 0x0003C000L //XPB_CLG_CFG1 #define XPB_CLG_CFG1__WCB_NUM__SHIFT 0x0 #define XPB_CLG_CFG1__LB_TYPE__SHIFT 0x4 #define XPB_CLG_CFG1__P2P_BAR__SHIFT 0x7 #define XPB_CLG_CFG1__HOST_FLUSH__SHIFT 0xa #define XPB_CLG_CFG1__SIDE_FLUSH__SHIFT 0xe #define XPB_CLG_CFG1__WCB_NUM_MASK 0x0000000FL #define XPB_CLG_CFG1__LB_TYPE_MASK 0x00000070L #define XPB_CLG_CFG1__P2P_BAR_MASK 0x00000380L #define XPB_CLG_CFG1__HOST_FLUSH_MASK 0x00003C00L #define XPB_CLG_CFG1__SIDE_FLUSH_MASK 0x0003C000L //XPB_CLG_CFG2 #define XPB_CLG_CFG2__WCB_NUM__SHIFT 0x0 #define XPB_CLG_CFG2__LB_TYPE__SHIFT 0x4 #define XPB_CLG_CFG2__P2P_BAR__SHIFT 0x7 #define XPB_CLG_CFG2__HOST_FLUSH__SHIFT 0xa #define XPB_CLG_CFG2__SIDE_FLUSH__SHIFT 0xe #define XPB_CLG_CFG2__WCB_NUM_MASK 0x0000000FL #define XPB_CLG_CFG2__LB_TYPE_MASK 0x00000070L #define XPB_CLG_CFG2__P2P_BAR_MASK 0x00000380L #define XPB_CLG_CFG2__HOST_FLUSH_MASK 0x00003C00L #define XPB_CLG_CFG2__SIDE_FLUSH_MASK 0x0003C000L //XPB_CLG_CFG3 #define XPB_CLG_CFG3__WCB_NUM__SHIFT 0x0 #define XPB_CLG_CFG3__LB_TYPE__SHIFT 0x4 #define XPB_CLG_CFG3__P2P_BAR__SHIFT 0x7 #define XPB_CLG_CFG3__HOST_FLUSH__SHIFT 0xa #define XPB_CLG_CFG3__SIDE_FLUSH__SHIFT 0xe #define XPB_CLG_CFG3__WCB_NUM_MASK 0x0000000FL #define XPB_CLG_CFG3__LB_TYPE_MASK 0x00000070L #define XPB_CLG_CFG3__P2P_BAR_MASK 0x00000380L #define XPB_CLG_CFG3__HOST_FLUSH_MASK 0x00003C00L #define XPB_CLG_CFG3__SIDE_FLUSH_MASK 0x0003C000L //XPB_CLG_CFG4 #define XPB_CLG_CFG4__WCB_NUM__SHIFT 0x0 #define XPB_CLG_CFG4__LB_TYPE__SHIFT 0x4 #define XPB_CLG_CFG4__P2P_BAR__SHIFT 0x7 #define XPB_CLG_CFG4__HOST_FLUSH__SHIFT 0xa #define XPB_CLG_CFG4__SIDE_FLUSH__SHIFT 0xe #define XPB_CLG_CFG4__WCB_NUM_MASK 0x0000000FL #define XPB_CLG_CFG4__LB_TYPE_MASK 0x00000070L #define XPB_CLG_CFG4__P2P_BAR_MASK 0x00000380L #define XPB_CLG_CFG4__HOST_FLUSH_MASK 0x00003C00L #define XPB_CLG_CFG4__SIDE_FLUSH_MASK 0x0003C000L //XPB_CLG_CFG5 #define XPB_CLG_CFG5__WCB_NUM__SHIFT 0x0 #define XPB_CLG_CFG5__LB_TYPE__SHIFT 0x4 #define XPB_CLG_CFG5__P2P_BAR__SHIFT 0x7 #define XPB_CLG_CFG5__HOST_FLUSH__SHIFT 0xa #define XPB_CLG_CFG5__SIDE_FLUSH__SHIFT 0xe #define XPB_CLG_CFG5__WCB_NUM_MASK 0x0000000FL #define XPB_CLG_CFG5__LB_TYPE_MASK 0x00000070L #define XPB_CLG_CFG5__P2P_BAR_MASK 0x00000380L #define XPB_CLG_CFG5__HOST_FLUSH_MASK 0x00003C00L #define XPB_CLG_CFG5__SIDE_FLUSH_MASK 0x0003C000L //XPB_CLG_CFG6 #define XPB_CLG_CFG6__WCB_NUM__SHIFT 0x0 #define XPB_CLG_CFG6__LB_TYPE__SHIFT 0x4 #define XPB_CLG_CFG6__P2P_BAR__SHIFT 0x7 #define XPB_CLG_CFG6__HOST_FLUSH__SHIFT 0xa #define XPB_CLG_CFG6__SIDE_FLUSH__SHIFT 0xe #define XPB_CLG_CFG6__WCB_NUM_MASK 0x0000000FL #define XPB_CLG_CFG6__LB_TYPE_MASK 0x00000070L #define XPB_CLG_CFG6__P2P_BAR_MASK 0x00000380L #define XPB_CLG_CFG6__HOST_FLUSH_MASK 0x00003C00L #define XPB_CLG_CFG6__SIDE_FLUSH_MASK 0x0003C000L //XPB_CLG_CFG7 #define XPB_CLG_CFG7__WCB_NUM__SHIFT 0x0 #define XPB_CLG_CFG7__LB_TYPE__SHIFT 0x4 #define XPB_CLG_CFG7__P2P_BAR__SHIFT 0x7 #define XPB_CLG_CFG7__HOST_FLUSH__SHIFT 0xa #define XPB_CLG_CFG7__SIDE_FLUSH__SHIFT 0xe #define XPB_CLG_CFG7__WCB_NUM_MASK 0x0000000FL #define XPB_CLG_CFG7__LB_TYPE_MASK 0x00000070L #define XPB_CLG_CFG7__P2P_BAR_MASK 0x00000380L #define XPB_CLG_CFG7__HOST_FLUSH_MASK 0x00003C00L #define XPB_CLG_CFG7__SIDE_FLUSH_MASK 0x0003C000L //XPB_CLG_EXTRA #define XPB_CLG_EXTRA__CMP0_HIGH__SHIFT 0x0 #define XPB_CLG_EXTRA__CMP0_LOW__SHIFT 0x6 #define XPB_CLG_EXTRA__VLD0__SHIFT 0xb #define XPB_CLG_EXTRA__CLG0_NUM__SHIFT 0xc #define XPB_CLG_EXTRA__CMP1_HIGH__SHIFT 0xf #define XPB_CLG_EXTRA__CMP1_LOW__SHIFT 0x15 #define XPB_CLG_EXTRA__VLD1__SHIFT 0x1a #define XPB_CLG_EXTRA__CLG1_NUM__SHIFT 0x1b #define XPB_CLG_EXTRA__CMP0_HIGH_MASK 0x0000003FL #define XPB_CLG_EXTRA__CMP0_LOW_MASK 0x000007C0L #define XPB_CLG_EXTRA__VLD0_MASK 0x00000800L #define XPB_CLG_EXTRA__CLG0_NUM_MASK 0x00007000L #define XPB_CLG_EXTRA__CMP1_HIGH_MASK 0x001F8000L #define XPB_CLG_EXTRA__CMP1_LOW_MASK 0x03E00000L #define XPB_CLG_EXTRA__VLD1_MASK 0x04000000L #define XPB_CLG_EXTRA__CLG1_NUM_MASK 0x38000000L //XPB_CLG_EXTRA_MSK #define XPB_CLG_EXTRA_MSK__MSK0_HIGH__SHIFT 0x0 #define XPB_CLG_EXTRA_MSK__MSK0_LOW__SHIFT 0x6 #define XPB_CLG_EXTRA_MSK__MSK1_HIGH__SHIFT 0xb #define XPB_CLG_EXTRA_MSK__MSK1_LOW__SHIFT 0x11 #define XPB_CLG_EXTRA_MSK__MSK0_HIGH_MASK 0x0000003FL #define XPB_CLG_EXTRA_MSK__MSK0_LOW_MASK 0x000007C0L #define XPB_CLG_EXTRA_MSK__MSK1_HIGH_MASK 0x0001F800L #define XPB_CLG_EXTRA_MSK__MSK1_LOW_MASK 0x003E0000L //XPB_LB_ADDR #define XPB_LB_ADDR__CMP0__SHIFT 0x0 #define XPB_LB_ADDR__MASK0__SHIFT 0xa #define XPB_LB_ADDR__CMP1__SHIFT 0x14 #define XPB_LB_ADDR__MASK1__SHIFT 0x1a #define XPB_LB_ADDR__CMP0_MASK 0x000003FFL #define XPB_LB_ADDR__MASK0_MASK 0x000FFC00L #define XPB_LB_ADDR__CMP1_MASK 0x03F00000L #define XPB_LB_ADDR__MASK1_MASK 0xFC000000L //XPB_WCB_STS #define XPB_WCB_STS__PBUF_VLD__SHIFT 0x0 #define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x10 #define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x17 #define XPB_WCB_STS__PBUF_VLD_MASK 0x0000FFFFL #define XPB_WCB_STS__WCB_HST_DATA_BUF_CNT_MASK 0x007F0000L #define XPB_WCB_STS__WCB_SID_DATA_BUF_CNT_MASK 0x3F800000L //XPB_HST_CFG #define XPB_HST_CFG__BAR_UP_WR_CMD__SHIFT 0x0 #define XPB_HST_CFG__BAR_UP_WR_CMD_MASK 0x00000001L //XPB_P2P_BAR_CFG #define XPB_P2P_BAR_CFG__ADDR_SIZE__SHIFT 0x0 #define XPB_P2P_BAR_CFG__SEND_BAR__SHIFT 0x4 #define XPB_P2P_BAR_CFG__SNOOP__SHIFT 0x6 #define XPB_P2P_BAR_CFG__SEND_DIS__SHIFT 0x7 #define XPB_P2P_BAR_CFG__COMPRESS_DIS__SHIFT 0x8 #define XPB_P2P_BAR_CFG__UPDATE_DIS__SHIFT 0x9 #define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR__SHIFT 0xa #define XPB_P2P_BAR_CFG__RD_EN__SHIFT 0xb #define XPB_P2P_BAR_CFG__ATC_TRANSLATED__SHIFT 0xc #define XPB_P2P_BAR_CFG__ADDR_SIZE_MASK 0x0000000FL #define XPB_P2P_BAR_CFG__SEND_BAR_MASK 0x00000030L #define XPB_P2P_BAR_CFG__SNOOP_MASK 0x00000040L #define XPB_P2P_BAR_CFG__SEND_DIS_MASK 0x00000080L #define XPB_P2P_BAR_CFG__COMPRESS_DIS_MASK 0x00000100L #define XPB_P2P_BAR_CFG__UPDATE_DIS_MASK 0x00000200L #define XPB_P2P_BAR_CFG__REGBAR_FROM_SYSBAR_MASK 0x00000400L #define XPB_P2P_BAR_CFG__RD_EN_MASK 0x00000800L #define XPB_P2P_BAR_CFG__ATC_TRANSLATED_MASK 0x00001000L //XPB_P2P_BAR0 #define XPB_P2P_BAR0__HOST_FLUSH__SHIFT 0x0 #define XPB_P2P_BAR0__REG_SYS_BAR__SHIFT 0x4 #define XPB_P2P_BAR0__MEM_SYS_BAR__SHIFT 0x8 #define XPB_P2P_BAR0__VALID__SHIFT 0xc #define XPB_P2P_BAR0__SEND_DIS__SHIFT 0xd #define XPB_P2P_BAR0__COMPRESS_DIS__SHIFT 0xe #define XPB_P2P_BAR0__RESERVE__SHIFT 0xf #define XPB_P2P_BAR0__ADDRESS__SHIFT 0x10 #define XPB_P2P_BAR0__HOST_FLUSH_MASK 0x0000000FL #define XPB_P2P_BAR0__REG_SYS_BAR_MASK 0x000000F0L #define XPB_P2P_BAR0__MEM_SYS_BAR_MASK 0x00000F00L #define XPB_P2P_BAR0__VALID_MASK 0x00001000L #define XPB_P2P_BAR0__SEND_DIS_MASK 0x00002000L #define XPB_P2P_BAR0__COMPRESS_DIS_MASK 0x00004000L #define XPB_P2P_BAR0__RESERVE_MASK 0x00008000L #define XPB_P2P_BAR0__ADDRESS_MASK 0xFFFF0000L //XPB_P2P_BAR1 #define XPB_P2P_BAR1__HOST_FLUSH__SHIFT 0x0 #define XPB_P2P_BAR1__REG_SYS_BAR__SHIFT 0x4 #define XPB_P2P_BAR1__MEM_SYS_BAR__SHIFT 0x8 #define XPB_P2P_BAR1__VALID__SHIFT 0xc #define XPB_P2P_BAR1__SEND_DIS__SHIFT 0xd #define XPB_P2P_BAR1__COMPRESS_DIS__SHIFT 0xe #define XPB_P2P_BAR1__RESERVE__SHIFT 0xf #define XPB_P2P_BAR1__ADDRESS__SHIFT 0x10 #define XPB_P2P_BAR1__HOST_FLUSH_MASK 0x0000000FL #define XPB_P2P_BAR1__REG_SYS_BAR_MASK 0x000000F0L #define XPB_P2P_BAR1__MEM_SYS_BAR_MASK 0x00000F00L #define XPB_P2P_BAR1__VALID_MASK 0x00001000L #define XPB_P2P_BAR1__SEND_DIS_MASK 0x00002000L #define XPB_P2P_BAR1__COMPRESS_DIS_MASK 0x00004000L #define XPB_P2P_BAR1__RESERVE_MASK 0x00008000L #define XPB_P2P_BAR1__ADDRESS_MASK 0xFFFF0000L //XPB_P2P_BAR2 #define XPB_P2P_BAR2__HOST_FLUSH__SHIFT 0x0 #define XPB_P2P_BAR2__REG_SYS_BAR__SHIFT 0x4 #define XPB_P2P_BAR2__MEM_SYS_BAR__SHIFT 0x8 #define XPB_P2P_BAR2__VALID__SHIFT 0xc #define XPB_P2P_BAR2__SEND_DIS__SHIFT 0xd #define XPB_P2P_BAR2__COMPRESS_DIS__SHIFT 0xe #define XPB_P2P_BAR2__RESERVE__SHIFT 0xf #define XPB_P2P_BAR2__ADDRESS__SHIFT 0x10 #define XPB_P2P_BAR2__HOST_FLUSH_MASK 0x0000000FL #define XPB_P2P_BAR2__REG_SYS_BAR_MASK 0x000000F0L #define XPB_P2P_BAR2__MEM_SYS_BAR_MASK 0x00000F00L #define XPB_P2P_BAR2__VALID_MASK 0x00001000L #define XPB_P2P_BAR2__SEND_DIS_MASK 0x00002000L #define XPB_P2P_BAR2__COMPRESS_DIS_MASK 0x00004000L #define XPB_P2P_BAR2__RESERVE_MASK 0x00008000L #define XPB_P2P_BAR2__ADDRESS_MASK 0xFFFF0000L //XPB_P2P_BAR3 #define XPB_P2P_BAR3__HOST_FLUSH__SHIFT 0x0 #define XPB_P2P_BAR3__REG_SYS_BAR__SHIFT 0x4 #define XPB_P2P_BAR3__MEM_SYS_BAR__SHIFT 0x8 #define XPB_P2P_BAR3__VALID__SHIFT 0xc #define XPB_P2P_BAR3__SEND_DIS__SHIFT 0xd #define XPB_P2P_BAR3__COMPRESS_DIS__SHIFT 0xe #define XPB_P2P_BAR3__RESERVE__SHIFT 0xf #define XPB_P2P_BAR3__ADDRESS__SHIFT 0x10 #define XPB_P2P_BAR3__HOST_FLUSH_MASK 0x0000000FL #define XPB_P2P_BAR3__REG_SYS_BAR_MASK 0x000000F0L #define XPB_P2P_BAR3__MEM_SYS_BAR_MASK 0x00000F00L #define XPB_P2P_BAR3__VALID_MASK 0x00001000L #define XPB_P2P_BAR3__SEND_DIS_MASK 0x00002000L #define XPB_P2P_BAR3__COMPRESS_DIS_MASK 0x00004000L #define XPB_P2P_BAR3__RESERVE_MASK 0x00008000L #define XPB_P2P_BAR3__ADDRESS_MASK 0xFFFF0000L //XPB_P2P_BAR4 #define XPB_P2P_BAR4__HOST_FLUSH__SHIFT 0x0 #define XPB_P2P_BAR4__REG_SYS_BAR__SHIFT 0x4 #define XPB_P2P_BAR4__MEM_SYS_BAR__SHIFT 0x8 #define XPB_P2P_BAR4__VALID__SHIFT 0xc #define XPB_P2P_BAR4__SEND_DIS__SHIFT 0xd #define XPB_P2P_BAR4__COMPRESS_DIS__SHIFT 0xe #define XPB_P2P_BAR4__RESERVE__SHIFT 0xf #define XPB_P2P_BAR4__ADDRESS__SHIFT 0x10 #define XPB_P2P_BAR4__HOST_FLUSH_MASK 0x0000000FL #define XPB_P2P_BAR4__REG_SYS_BAR_MASK 0x000000F0L #define XPB_P2P_BAR4__MEM_SYS_BAR_MASK 0x00000F00L #define XPB_P2P_BAR4__VALID_MASK 0x00001000L #define XPB_P2P_BAR4__SEND_DIS_MASK 0x00002000L #define XPB_P2P_BAR4__COMPRESS_DIS_MASK 0x00004000L #define XPB_P2P_BAR4__RESERVE_MASK 0x00008000L #define XPB_P2P_BAR4__ADDRESS_MASK 0xFFFF0000L //XPB_P2P_BAR5 #define XPB_P2P_BAR5__HOST_FLUSH__SHIFT 0x0 #define XPB_P2P_BAR5__REG_SYS_BAR__SHIFT 0x4 #define XPB_P2P_BAR5__MEM_SYS_BAR__SHIFT 0x8 #define XPB_P2P_BAR5__VALID__SHIFT 0xc #define XPB_P2P_BAR5__SEND_DIS__SHIFT 0xd #define XPB_P2P_BAR5__COMPRESS_DIS__SHIFT 0xe #define XPB_P2P_BAR5__RESERVE__SHIFT 0xf #define XPB_P2P_BAR5__ADDRESS__SHIFT 0x10 #define XPB_P2P_BAR5__HOST_FLUSH_MASK 0x0000000FL #define XPB_P2P_BAR5__REG_SYS_BAR_MASK 0x000000F0L #define XPB_P2P_BAR5__MEM_SYS_BAR_MASK 0x00000F00L #define XPB_P2P_BAR5__VALID_MASK 0x00001000L #define XPB_P2P_BAR5__SEND_DIS_MASK 0x00002000L #define XPB_P2P_BAR5__COMPRESS_DIS_MASK 0x00004000L #define XPB_P2P_BAR5__RESERVE_MASK 0x00008000L #define XPB_P2P_BAR5__ADDRESS_MASK 0xFFFF0000L //XPB_P2P_BAR6 #define XPB_P2P_BAR6__HOST_FLUSH__SHIFT 0x0 #define XPB_P2P_BAR6__REG_SYS_BAR__SHIFT 0x4 #define XPB_P2P_BAR6__MEM_SYS_BAR__SHIFT 0x8 #define XPB_P2P_BAR6__VALID__SHIFT 0xc #define XPB_P2P_BAR6__SEND_DIS__SHIFT 0xd #define XPB_P2P_BAR6__COMPRESS_DIS__SHIFT 0xe #define XPB_P2P_BAR6__RESERVE__SHIFT 0xf #define XPB_P2P_BAR6__ADDRESS__SHIFT 0x10 #define XPB_P2P_BAR6__HOST_FLUSH_MASK 0x0000000FL #define XPB_P2P_BAR6__REG_SYS_BAR_MASK 0x000000F0L #define XPB_P2P_BAR6__MEM_SYS_BAR_MASK 0x00000F00L #define XPB_P2P_BAR6__VALID_MASK 0x00001000L #define XPB_P2P_BAR6__SEND_DIS_MASK 0x00002000L #define XPB_P2P_BAR6__COMPRESS_DIS_MASK 0x00004000L #define XPB_P2P_BAR6__RESERVE_MASK 0x00008000L #define XPB_P2P_BAR6__ADDRESS_MASK 0xFFFF0000L //XPB_P2P_BAR7 #define XPB_P2P_BAR7__HOST_FLUSH__SHIFT 0x0 #define XPB_P2P_BAR7__REG_SYS_BAR__SHIFT 0x4 #define XPB_P2P_BAR7__MEM_SYS_BAR__SHIFT 0x8 #define XPB_P2P_BAR7__VALID__SHIFT 0xc #define XPB_P2P_BAR7__SEND_DIS__SHIFT 0xd #define XPB_P2P_BAR7__COMPRESS_DIS__SHIFT 0xe #define XPB_P2P_BAR7__RESERVE__SHIFT 0xf #define XPB_P2P_BAR7__ADDRESS__SHIFT 0x10 #define XPB_P2P_BAR7__HOST_FLUSH_MASK 0x0000000FL #define XPB_P2P_BAR7__REG_SYS_BAR_MASK 0x000000F0L #define XPB_P2P_BAR7__MEM_SYS_BAR_MASK 0x00000F00L #define XPB_P2P_BAR7__VALID_MASK 0x00001000L #define XPB_P2P_BAR7__SEND_DIS_MASK 0x00002000L #define XPB_P2P_BAR7__COMPRESS_DIS_MASK 0x00004000L #define XPB_P2P_BAR7__RESERVE_MASK 0x00008000L #define XPB_P2P_BAR7__ADDRESS_MASK 0xFFFF0000L //XPB_P2P_BAR_SETUP #define XPB_P2P_BAR_SETUP__SEL__SHIFT 0x0 #define XPB_P2P_BAR_SETUP__REG_SYS_BAR__SHIFT 0x8 #define XPB_P2P_BAR_SETUP__VALID__SHIFT 0xc #define XPB_P2P_BAR_SETUP__SEND_DIS__SHIFT 0xd #define XPB_P2P_BAR_SETUP__COMPRESS_DIS__SHIFT 0xe #define XPB_P2P_BAR_SETUP__RESERVE__SHIFT 0xf #define XPB_P2P_BAR_SETUP__ADDRESS__SHIFT 0x10 #define XPB_P2P_BAR_SETUP__SEL_MASK 0x000000FFL #define XPB_P2P_BAR_SETUP__REG_SYS_BAR_MASK 0x00000F00L #define XPB_P2P_BAR_SETUP__VALID_MASK 0x00001000L #define XPB_P2P_BAR_SETUP__SEND_DIS_MASK 0x00002000L #define XPB_P2P_BAR_SETUP__COMPRESS_DIS_MASK 0x00004000L #define XPB_P2P_BAR_SETUP__RESERVE_MASK 0x00008000L #define XPB_P2P_BAR_SETUP__ADDRESS_MASK 0xFFFF0000L //XPB_P2P_BAR_DELTA_ABOVE #define XPB_P2P_BAR_DELTA_ABOVE__EN__SHIFT 0x0 #define XPB_P2P_BAR_DELTA_ABOVE__DELTA__SHIFT 0x8 #define XPB_P2P_BAR_DELTA_ABOVE__EN_MASK 0x000000FFL #define XPB_P2P_BAR_DELTA_ABOVE__DELTA_MASK 0x0FFFFF00L //XPB_P2P_BAR_DELTA_BELOW #define XPB_P2P_BAR_DELTA_BELOW__EN__SHIFT 0x0 #define XPB_P2P_BAR_DELTA_BELOW__DELTA__SHIFT 0x8 #define XPB_P2P_BAR_DELTA_BELOW__EN_MASK 0x000000FFL #define XPB_P2P_BAR_DELTA_BELOW__DELTA_MASK 0x0FFFFF00L //XPB_PEER_SYS_BAR0 #define XPB_PEER_SYS_BAR0__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR0__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR0__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR0__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR1 #define XPB_PEER_SYS_BAR1__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR1__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR1__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR1__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR2 #define XPB_PEER_SYS_BAR2__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR2__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR2__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR2__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR3 #define XPB_PEER_SYS_BAR3__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR3__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR3__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR3__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR4 #define XPB_PEER_SYS_BAR4__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR4__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR4__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR4__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR5 #define XPB_PEER_SYS_BAR5__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR5__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR5__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR5__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR6 #define XPB_PEER_SYS_BAR6__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR6__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR6__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR6__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR7 #define XPB_PEER_SYS_BAR7__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR7__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR7__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR7__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR8 #define XPB_PEER_SYS_BAR8__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR8__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR8__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR8__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR9 #define XPB_PEER_SYS_BAR9__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR9__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR9__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR9__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR10 #define XPB_PEER_SYS_BAR10__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR10__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR10__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR10__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR11 #define XPB_PEER_SYS_BAR11__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR11__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR11__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR11__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR12 #define XPB_PEER_SYS_BAR12__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR12__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR12__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR12__ADDR_MASK 0xFFFFFFFEL //XPB_PEER_SYS_BAR13 #define XPB_PEER_SYS_BAR13__VALID__SHIFT 0x0 #define XPB_PEER_SYS_BAR13__ADDR__SHIFT 0x1 #define XPB_PEER_SYS_BAR13__VALID_MASK 0x00000001L #define XPB_PEER_SYS_BAR13__ADDR_MASK 0xFFFFFFFEL //XPB_CLK_GAT #define XPB_CLK_GAT__ONDLY__SHIFT 0x0 #define XPB_CLK_GAT__OFFDLY__SHIFT 0x6 #define XPB_CLK_GAT__RDYDLY__SHIFT 0xc #define XPB_CLK_GAT__ENABLE__SHIFT 0x12 #define XPB_CLK_GAT__MEM_LS_ENABLE__SHIFT 0x13 #define XPB_CLK_GAT__ONDLY_MASK 0x0000003FL #define XPB_CLK_GAT__OFFDLY_MASK 0x00000FC0L #define XPB_CLK_GAT__RDYDLY_MASK 0x0003F000L #define XPB_CLK_GAT__ENABLE_MASK 0x00040000L #define XPB_CLK_GAT__MEM_LS_ENABLE_MASK 0x00080000L //XPB_INTF_CFG #define XPB_INTF_CFG__RPB_WRREQ_CRD__SHIFT 0x0 #define XPB_INTF_CFG__MC_WRRET_ASK__SHIFT 0x8 #define XPB_INTF_CFG__XSP_REQ_CRD__SHIFT 0x10 #define XPB_INTF_CFG__P2P_WR_CHAIN_BREAK__SHIFT 0x17 #define XPB_INTF_CFG__XSP_SNOOP_SEL__SHIFT 0x1b #define XPB_INTF_CFG__XSP_SNOOP_VAL__SHIFT 0x1d #define XPB_INTF_CFG__XSP_ORDERING_SEL__SHIFT 0x1e #define XPB_INTF_CFG__QUALIFY_P2P_FOR_GPA__SHIFT 0x1f #define XPB_INTF_CFG__RPB_WRREQ_CRD_MASK 0x000000FFL #define XPB_INTF_CFG__MC_WRRET_ASK_MASK 0x0000FF00L #define XPB_INTF_CFG__XSP_REQ_CRD_MASK 0x007F0000L #define XPB_INTF_CFG__P2P_WR_CHAIN_BREAK_MASK 0x00800000L #define XPB_INTF_CFG__XSP_SNOOP_SEL_MASK 0x18000000L #define XPB_INTF_CFG__XSP_SNOOP_VAL_MASK 0x20000000L #define XPB_INTF_CFG__XSP_ORDERING_SEL_MASK 0x40000000L #define XPB_INTF_CFG__QUALIFY_P2P_FOR_GPA_MASK 0x80000000L //XPB_INTF_STS #define XPB_INTF_STS__RPB_WRREQ_CRD__SHIFT 0x0 #define XPB_INTF_STS__XSP_REQ_CRD__SHIFT 0x8 #define XPB_INTF_STS__HOP_DATA_BUF_FULL__SHIFT 0xf #define XPB_INTF_STS__HOP_ATTR_BUF_FULL__SHIFT 0x10 #define XPB_INTF_STS__CNS_BUF_FULL__SHIFT 0x11 #define XPB_INTF_STS__CNS_BUF_BUSY__SHIFT 0x12 #define XPB_INTF_STS__RPB_RDREQ_CRD__SHIFT 0x13 #define XPB_INTF_STS__RPB_WRREQ_CRD_MASK 0x000000FFL #define XPB_INTF_STS__XSP_REQ_CRD_MASK 0x00007F00L #define XPB_INTF_STS__HOP_DATA_BUF_FULL_MASK 0x00008000L #define XPB_INTF_STS__HOP_ATTR_BUF_FULL_MASK 0x00010000L #define XPB_INTF_STS__CNS_BUF_FULL_MASK 0x00020000L #define XPB_INTF_STS__CNS_BUF_BUSY_MASK 0x00040000L #define XPB_INTF_STS__RPB_RDREQ_CRD_MASK 0x07F80000L //XPB_PIPE_STS #define XPB_PIPE_STS__WCB_ANY_PBUF__SHIFT 0x0 #define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT__SHIFT 0x1 #define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT__SHIFT 0x8 #define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL__SHIFT 0xf #define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL__SHIFT 0x10 #define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL__SHIFT 0x11 #define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL__SHIFT 0x12 #define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL__SHIFT 0x13 #define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL__SHIFT 0x14 #define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL__SHIFT 0x15 #define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL__SHIFT 0x16 #define XPB_PIPE_STS__RET_BUF_FULL__SHIFT 0x17 #define XPB_PIPE_STS__XPB_CLK_BUSY_BITS__SHIFT 0x18 #define XPB_PIPE_STS__WCB_ANY_PBUF_MASK 0x00000001L #define XPB_PIPE_STS__WCB_HST_DATA_BUF_CNT_MASK 0x000000FEL #define XPB_PIPE_STS__WCB_SID_DATA_BUF_CNT_MASK 0x00007F00L #define XPB_PIPE_STS__WCB_HST_RD_PTR_BUF_FULL_MASK 0x00008000L #define XPB_PIPE_STS__WCB_SID_RD_PTR_BUF_FULL_MASK 0x00010000L #define XPB_PIPE_STS__WCB_HST_REQ_FIFO_FULL_MASK 0x00020000L #define XPB_PIPE_STS__WCB_SID_REQ_FIFO_FULL_MASK 0x00040000L #define XPB_PIPE_STS__WCB_HST_REQ_OBUF_FULL_MASK 0x00080000L #define XPB_PIPE_STS__WCB_SID_REQ_OBUF_FULL_MASK 0x00100000L #define XPB_PIPE_STS__WCB_HST_DATA_OBUF_FULL_MASK 0x00200000L #define XPB_PIPE_STS__WCB_SID_DATA_OBUF_FULL_MASK 0x00400000L #define XPB_PIPE_STS__RET_BUF_FULL_MASK 0x00800000L #define XPB_PIPE_STS__XPB_CLK_BUSY_BITS_MASK 0xFF000000L //XPB_SUB_CTRL #define XPB_SUB_CTRL__WRREQ_BYPASS_XPB__SHIFT 0x0 #define XPB_SUB_CTRL__STALL_CNS_RTR_REQ__SHIFT 0x1 #define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ__SHIFT 0x2 #define XPB_SUB_CTRL__STALL_RTR_MAP_REQ__SHIFT 0x3 #define XPB_SUB_CTRL__STALL_MAP_WCB_REQ__SHIFT 0x4 #define XPB_SUB_CTRL__STALL_WCB_SID_REQ__SHIFT 0x5 #define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND__SHIFT 0x6 #define XPB_SUB_CTRL__STALL_WCB_HST_REQ__SHIFT 0x7 #define XPB_SUB_CTRL__STALL_HST_HOP_REQ__SHIFT 0x8 #define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR__SHIFT 0x9 #define XPB_SUB_CTRL__RESET_CNS__SHIFT 0xa #define XPB_SUB_CTRL__RESET_RTR__SHIFT 0xb #define XPB_SUB_CTRL__RESET_RET__SHIFT 0xc #define XPB_SUB_CTRL__RESET_MAP__SHIFT 0xd #define XPB_SUB_CTRL__RESET_WCB__SHIFT 0xe #define XPB_SUB_CTRL__RESET_HST__SHIFT 0xf #define XPB_SUB_CTRL__RESET_HOP__SHIFT 0x10 #define XPB_SUB_CTRL__RESET_SID__SHIFT 0x11 #define XPB_SUB_CTRL__RESET_SRB__SHIFT 0x12 #define XPB_SUB_CTRL__RESET_CGR__SHIFT 0x13 #define XPB_SUB_CTRL__WRREQ_BYPASS_XPB_MASK 0x00000001L #define XPB_SUB_CTRL__STALL_CNS_RTR_REQ_MASK 0x00000002L #define XPB_SUB_CTRL__STALL_RTR_RPB_WRREQ_MASK 0x00000004L #define XPB_SUB_CTRL__STALL_RTR_MAP_REQ_MASK 0x00000008L #define XPB_SUB_CTRL__STALL_MAP_WCB_REQ_MASK 0x00000010L #define XPB_SUB_CTRL__STALL_WCB_SID_REQ_MASK 0x00000020L #define XPB_SUB_CTRL__STALL_MC_XSP_REQ_SEND_MASK 0x00000040L #define XPB_SUB_CTRL__STALL_WCB_HST_REQ_MASK 0x00000080L #define XPB_SUB_CTRL__STALL_HST_HOP_REQ_MASK 0x00000100L #define XPB_SUB_CTRL__STALL_XPB_RPB_REQ_ATTR_MASK 0x00000200L #define XPB_SUB_CTRL__RESET_CNS_MASK 0x00000400L #define XPB_SUB_CTRL__RESET_RTR_MASK 0x00000800L #define XPB_SUB_CTRL__RESET_RET_MASK 0x00001000L #define XPB_SUB_CTRL__RESET_MAP_MASK 0x00002000L #define XPB_SUB_CTRL__RESET_WCB_MASK 0x00004000L #define XPB_SUB_CTRL__RESET_HST_MASK 0x00008000L #define XPB_SUB_CTRL__RESET_HOP_MASK 0x00010000L #define XPB_SUB_CTRL__RESET_SID_MASK 0x00020000L #define XPB_SUB_CTRL__RESET_SRB_MASK 0x00040000L #define XPB_SUB_CTRL__RESET_CGR_MASK 0x00080000L //XPB_MAP_INVERT_FLUSH_NUM_LSB #define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM__SHIFT 0x0 #define XPB_MAP_INVERT_FLUSH_NUM_LSB__ALTER_FLUSH_NUM_MASK 0x0000FFFFL //XPB_PERF_KNOBS #define XPB_PERF_KNOBS__CNS_FIFO_DEPTH__SHIFT 0x0 #define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH__SHIFT 0x6 #define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH__SHIFT 0xc #define XPB_PERF_KNOBS__CNS_FIFO_DEPTH_MASK 0x0000003FL #define XPB_PERF_KNOBS__WCB_HST_FIFO_DEPTH_MASK 0x00000FC0L #define XPB_PERF_KNOBS__WCB_SID_FIFO_DEPTH_MASK 0x0003F000L //XPB_STICKY #define XPB_STICKY__BITS__SHIFT 0x0 #define XPB_STICKY__BITS_MASK 0xFFFFFFFFL //XPB_STICKY_W1C #define XPB_STICKY_W1C__BITS__SHIFT 0x0 #define XPB_STICKY_W1C__BITS_MASK 0xFFFFFFFFL //XPB_MISC_CFG #define XPB_MISC_CFG__FIELDNAME0__SHIFT 0x0 #define XPB_MISC_CFG__FIELDNAME1__SHIFT 0x8 #define XPB_MISC_CFG__FIELDNAME2__SHIFT 0x10 #define XPB_MISC_CFG__FIELDNAME3__SHIFT 0x18 #define XPB_MISC_CFG__TRIGGERNAME__SHIFT 0x1f #define XPB_MISC_CFG__FIELDNAME0_MASK 0x000000FFL #define XPB_MISC_CFG__FIELDNAME1_MASK 0x0000FF00L #define XPB_MISC_CFG__FIELDNAME2_MASK 0x00FF0000L #define XPB_MISC_CFG__FIELDNAME3_MASK 0x7F000000L #define XPB_MISC_CFG__TRIGGERNAME_MASK 0x80000000L //XPB_INTF_CFG2 #define XPB_INTF_CFG2__RPB_RDREQ_CRD__SHIFT 0x0 #define XPB_INTF_CFG2__RPB_RDREQ_CRD_MASK 0x000000FFL //XPB_CLG_EXTRA_RD #define XPB_CLG_EXTRA_RD__CMP0_HIGH__SHIFT 0x0 #define XPB_CLG_EXTRA_RD__CMP0_LOW__SHIFT 0x6 #define XPB_CLG_EXTRA_RD__VLD0__SHIFT 0xb #define XPB_CLG_EXTRA_RD__CLG0_NUM__SHIFT 0xc #define XPB_CLG_EXTRA_RD__CMP1_HIGH__SHIFT 0xf #define XPB_CLG_EXTRA_RD__CMP1_LOW__SHIFT 0x15 #define XPB_CLG_EXTRA_RD__VLD1__SHIFT 0x1a #define XPB_CLG_EXTRA_RD__CLG1_NUM__SHIFT 0x1b #define XPB_CLG_EXTRA_RD__CMP0_HIGH_MASK 0x0000003FL #define XPB_CLG_EXTRA_RD__CMP0_LOW_MASK 0x000007C0L #define XPB_CLG_EXTRA_RD__VLD0_MASK 0x00000800L #define XPB_CLG_EXTRA_RD__CLG0_NUM_MASK 0x00007000L #define XPB_CLG_EXTRA_RD__CMP1_HIGH_MASK 0x001F8000L #define XPB_CLG_EXTRA_RD__CMP1_LOW_MASK 0x03E00000L #define XPB_CLG_EXTRA_RD__VLD1_MASK 0x04000000L #define XPB_CLG_EXTRA_RD__CLG1_NUM_MASK 0x38000000L //XPB_CLG_EXTRA_MSK_RD #define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH__SHIFT 0x0 #define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW__SHIFT 0x6 #define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH__SHIFT 0xb #define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW__SHIFT 0x11 #define XPB_CLG_EXTRA_MSK_RD__MSK0_HIGH_MASK 0x0000003FL #define XPB_CLG_EXTRA_MSK_RD__MSK0_LOW_MASK 0x000007C0L #define XPB_CLG_EXTRA_MSK_RD__MSK1_HIGH_MASK 0x0001F800L #define XPB_CLG_EXTRA_MSK_RD__MSK1_LOW_MASK 0x003E0000L //XPB_CLG_GFX_MATCH #define XPB_CLG_GFX_MATCH__FARBIRC0_ID__SHIFT 0x0 #define XPB_CLG_GFX_MATCH__FARBIRC1_ID__SHIFT 0x6 #define XPB_CLG_GFX_MATCH__FARBIRC2_ID__SHIFT 0xc #define XPB_CLG_GFX_MATCH__FARBIRC3_ID__SHIFT 0x12 #define XPB_CLG_GFX_MATCH__FARBIRC0_VLD__SHIFT 0x18 #define XPB_CLG_GFX_MATCH__FARBIRC1_VLD__SHIFT 0x19 #define XPB_CLG_GFX_MATCH__FARBIRC2_VLD__SHIFT 0x1a #define XPB_CLG_GFX_MATCH__FARBIRC3_VLD__SHIFT 0x1b #define XPB_CLG_GFX_MATCH__FARBIRC0_ID_MASK 0x0000003FL #define XPB_CLG_GFX_MATCH__FARBIRC1_ID_MASK 0x00000FC0L #define XPB_CLG_GFX_MATCH__FARBIRC2_ID_MASK 0x0003F000L #define XPB_CLG_GFX_MATCH__FARBIRC3_ID_MASK 0x00FC0000L #define XPB_CLG_GFX_MATCH__FARBIRC0_VLD_MASK 0x01000000L #define XPB_CLG_GFX_MATCH__FARBIRC1_VLD_MASK 0x02000000L #define XPB_CLG_GFX_MATCH__FARBIRC2_VLD_MASK 0x04000000L #define XPB_CLG_GFX_MATCH__FARBIRC3_VLD_MASK 0x08000000L //XPB_CLG_GFX_MATCH_MSK #define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0 #define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x6 #define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK__SHIFT 0xc #define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK__SHIFT 0x12 #define XPB_CLG_GFX_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x0000003FL #define XPB_CLG_GFX_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x00000FC0L #define XPB_CLG_GFX_MATCH_MSK__FARBIRC2_ID_MSK_MASK 0x0003F000L #define XPB_CLG_GFX_MATCH_MSK__FARBIRC3_ID_MSK_MASK 0x00FC0000L //XPB_CLG_MM_MATCH #define XPB_CLG_MM_MATCH__FARBIRC0_ID__SHIFT 0x0 #define XPB_CLG_MM_MATCH__FARBIRC1_ID__SHIFT 0x6 #define XPB_CLG_MM_MATCH__FARBIRC0_VLD__SHIFT 0xc #define XPB_CLG_MM_MATCH__FARBIRC1_VLD__SHIFT 0xd #define XPB_CLG_MM_MATCH__FARBIRC0_ID_MASK 0x0000003FL #define XPB_CLG_MM_MATCH__FARBIRC1_ID_MASK 0x00000FC0L #define XPB_CLG_MM_MATCH__FARBIRC0_VLD_MASK 0x00001000L #define XPB_CLG_MM_MATCH__FARBIRC1_VLD_MASK 0x00002000L //XPB_CLG_MM_MATCH_MSK #define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0 #define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK__SHIFT 0x6 #define XPB_CLG_MM_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x0000003FL #define XPB_CLG_MM_MATCH_MSK__FARBIRC1_ID_MSK_MASK 0x00000FC0L //XPB_CLG_GUS_MATCH #define XPB_CLG_GUS_MATCH__FARBIRC0_ID__SHIFT 0x0 #define XPB_CLG_GUS_MATCH__FARBIRC0_VLD__SHIFT 0x6 #define XPB_CLG_GUS_MATCH__FARBIRC0_ID_MASK 0x0000003FL #define XPB_CLG_GUS_MATCH__FARBIRC0_VLD_MASK 0x00000040L //XPB_CLG_GUS_MATCH_MSK #define XPB_CLG_GUS_MATCH_MSK__FARBIRC0_ID_MSK__SHIFT 0x0 #define XPB_CLG_GUS_MATCH_MSK__FARBIRC0_ID_MSK_MASK 0x0000003FL // addressBlock: athub_rpbdec //RPB_PASSPW_CONF #define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE__SHIFT 0x0 #define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE__SHIFT 0x1 #define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE__SHIFT 0x2 #define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_EN__SHIFT 0x3 #define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE__SHIFT 0x4 #define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_EN__SHIFT 0x5 #define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE__SHIFT 0x6 #define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_EN__SHIFT 0x7 #define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE__SHIFT 0x8 #define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_EN__SHIFT 0x9 #define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE__SHIFT 0xa #define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN__SHIFT 0xb #define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE__SHIFT 0xc #define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_EN__SHIFT 0xd #define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE__SHIFT 0xe #define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE__SHIFT 0xf #define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE__SHIFT 0x10 #define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE__SHIFT 0x11 #define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE__SHIFT 0x12 #define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE__SHIFT 0x13 #define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE__SHIFT 0x14 #define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN__SHIFT 0x15 #define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE__SHIFT 0x16 #define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN__SHIFT 0x17 #define RPB_PASSPW_CONF__XPB_PASSPW_OVERRIDE_MASK 0x00000001L #define RPB_PASSPW_CONF__XPB_RSPPASSPW_OVERRIDE_MASK 0x00000002L #define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_MASK 0x00000004L #define RPB_PASSPW_CONF__ATC_VC5_TR_PASSPW_OVERRIDE_EN_MASK 0x00000008L #define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_MASK 0x00000010L #define RPB_PASSPW_CONF__ATC_VC5_RSPPASSPW_OVERRIDE_EN_MASK 0x00000020L #define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_MASK 0x00000040L #define RPB_PASSPW_CONF__ATC_VC0_TR_PASSPW_OVERRIDE_EN_MASK 0x00000080L #define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_MASK 0x00000100L #define RPB_PASSPW_CONF__ATC_VC0_RSPPASSPW_OVERRIDE_EN_MASK 0x00000200L #define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_MASK 0x00000400L #define RPB_PASSPW_CONF__ATC_PAGE_PASSPW_OVERRIDE_EN_MASK 0x00000800L #define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_MASK 0x00001000L #define RPB_PASSPW_CONF__ATC_PAGE_RSPPASSPW_OVERRIDE_EN_MASK 0x00002000L #define RPB_PASSPW_CONF__WR_PASSPW_OVERRIDE_MASK 0x00004000L #define RPB_PASSPW_CONF__WR_RSPPASSPW_OVERRIDE_MASK 0x00008000L #define RPB_PASSPW_CONF__RD_PASSPW_OVERRIDE_MASK 0x00010000L #define RPB_PASSPW_CONF__RD_RSPPASSPW_OVERRIDE_MASK 0x00020000L #define RPB_PASSPW_CONF__ATOMIC_PASSPW_OVERRIDE_MASK 0x00040000L #define RPB_PASSPW_CONF__ATOMIC_RSPPASSPW_OVERRIDE_MASK 0x00080000L #define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_MASK 0x00100000L #define RPB_PASSPW_CONF__WRRSP_PASSPW_OVERRIDE_EN_MASK 0x00200000L #define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_MASK 0x00400000L #define RPB_PASSPW_CONF__RDRSP_PASSPW_OVERRIDE_EN_MASK 0x00800000L //RPB_BLOCKLEVEL_CONF #define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE__SHIFT 0x0 #define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x2 #define RPB_BLOCKLEVEL_CONF__ATC_VC5_TR_BLOCKLEVEL__SHIFT 0x3 #define RPB_BLOCKLEVEL_CONF__ATC_VC0_TR_BLOCKLEVEL__SHIFT 0x5 #define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL__SHIFT 0x7 #define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL__SHIFT 0x9 #define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE__SHIFT 0xb #define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0xd #define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE__SHIFT 0xe #define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x10 #define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE__SHIFT 0x11 #define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN__SHIFT 0x13 #define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_MASK 0x00000003L #define RPB_BLOCKLEVEL_CONF__XPB_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00000004L #define RPB_BLOCKLEVEL_CONF__ATC_VC5_TR_BLOCKLEVEL_MASK 0x00000018L #define RPB_BLOCKLEVEL_CONF__ATC_VC0_TR_BLOCKLEVEL_MASK 0x00000060L #define RPB_BLOCKLEVEL_CONF__ATC_PAGE_BLOCKLEVEL_MASK 0x00000180L #define RPB_BLOCKLEVEL_CONF__ATC_INV_BLOCKLEVEL_MASK 0x00000600L #define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_MASK 0x00001800L #define RPB_BLOCKLEVEL_CONF__IO_WR_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00002000L #define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_MASK 0x0000C000L #define RPB_BLOCKLEVEL_CONF__IO_RD_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00010000L #define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_MASK 0x00060000L #define RPB_BLOCKLEVEL_CONF__ATOMIC_BLOCKLEVEL_OVERRIDE_EN_MASK 0x00080000L //RPB_TAG_CONF #define RPB_TAG_CONF__RPB_IO_RD__SHIFT 0x0 #define RPB_TAG_CONF__RPB_IO_WR__SHIFT 0xa #define RPB_TAG_CONF__RPB_IO_RD_MASK 0x000003FFL #define RPB_TAG_CONF__RPB_IO_WR_MASK 0x000FFC00L //RPB_ARB_CNTL #define RPB_ARB_CNTL__RD_SWITCH_NUM__SHIFT 0x0 #define RPB_ARB_CNTL__WR_SWITCH_NUM__SHIFT 0x8 #define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM__SHIFT 0x10 #define RPB_ARB_CNTL__ARB_MODE__SHIFT 0x18 #define RPB_ARB_CNTL__SWITCH_NUM_MODE__SHIFT 0x19 #define RPB_ARB_CNTL__RPB_VC0_CRD__SHIFT 0x1a #define RPB_ARB_CNTL__DISABLE_FED__SHIFT 0x1f #define RPB_ARB_CNTL__RD_SWITCH_NUM_MASK 0x000000FFL #define RPB_ARB_CNTL__WR_SWITCH_NUM_MASK 0x0000FF00L #define RPB_ARB_CNTL__ATC_TR_SWITCH_NUM_MASK 0x00FF0000L #define RPB_ARB_CNTL__ARB_MODE_MASK 0x01000000L #define RPB_ARB_CNTL__SWITCH_NUM_MODE_MASK 0x02000000L #define RPB_ARB_CNTL__RPB_VC0_CRD_MASK 0x7C000000L #define RPB_ARB_CNTL__DISABLE_FED_MASK 0x80000000L //RPB_ARB_CNTL2 #define RPB_ARB_CNTL2__P2P_SWITCH_NUM__SHIFT 0x0 #define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM__SHIFT 0x8 #define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM__SHIFT 0x10 #define RPB_ARB_CNTL2__RPB_VC1_CRD__SHIFT 0x18 #define RPB_ARB_CNTL2__P2P_SWITCH_NUM_MASK 0x000000FFL #define RPB_ARB_CNTL2__ATOMIC_SWITCH_NUM_MASK 0x0000FF00L #define RPB_ARB_CNTL2__ATC_PAGE_SWITCH_NUM_MASK 0x00FF0000L #define RPB_ARB_CNTL2__RPB_VC1_CRD_MASK 0x1F000000L //RPB_BIF_CNTL #define RPB_BIF_CNTL__VC0_SWITCH_NUM__SHIFT 0x0 #define RPB_BIF_CNTL__VC1_SWITCH_NUM__SHIFT 0x8 #define RPB_BIF_CNTL__VC2_SWITCH_NUM__SHIFT 0x10 #define RPB_BIF_CNTL__NBIF_DMA_ORIGCLKCTL_EN__SHIFT 0x18 #define RPB_BIF_CNTL__TR_QOS_VC__SHIFT 0x19 #define RPB_BIF_CNTL__FATAL_ERROR_ENABLE__SHIFT 0x1c #define RPB_BIF_CNTL__RESERVE__SHIFT 0x1d #define RPB_BIF_CNTL__VC0_SWITCH_NUM_MASK 0x000000FFL #define RPB_BIF_CNTL__VC1_SWITCH_NUM_MASK 0x0000FF00L #define RPB_BIF_CNTL__VC2_SWITCH_NUM_MASK 0x00FF0000L #define RPB_BIF_CNTL__NBIF_DMA_ORIGCLKCTL_EN_MASK 0x01000000L #define RPB_BIF_CNTL__TR_QOS_VC_MASK 0x0E000000L #define RPB_BIF_CNTL__FATAL_ERROR_ENABLE_MASK 0x10000000L #define RPB_BIF_CNTL__RESERVE_MASK 0xE0000000L //RPB_BIF_CNTL2 #define RPB_BIF_CNTL2__ARB_MODE__SHIFT 0x0 #define RPB_BIF_CNTL2__DRAIN_VC_NUM__SHIFT 0x1 #define RPB_BIF_CNTL2__SWITCH_ENABLE__SHIFT 0x3 #define RPB_BIF_CNTL2__SWITCH_THRESHOLD__SHIFT 0x4 #define RPB_BIF_CNTL2__PAGE_PRI_EN__SHIFT 0xc #define RPB_BIF_CNTL2__VC5_TR_PRI_EN__SHIFT 0xd #define RPB_BIF_CNTL2__VC0_TR_PRI_EN__SHIFT 0xe #define RPB_BIF_CNTL2__VC0_CHAINED_OVERRIDE__SHIFT 0xf #define RPB_BIF_CNTL2__PARITY_CHECK_EN__SHIFT 0x10 #define RPB_BIF_CNTL2__NBIF_HST_COMPCLKCTL_EN__SHIFT 0x11 #define RPB_BIF_CNTL2__RESERVE__SHIFT 0x19 #define RPB_BIF_CNTL2__ARB_MODE_MASK 0x00000001L #define RPB_BIF_CNTL2__DRAIN_VC_NUM_MASK 0x00000006L #define RPB_BIF_CNTL2__SWITCH_ENABLE_MASK 0x00000008L #define RPB_BIF_CNTL2__SWITCH_THRESHOLD_MASK 0x00000FF0L #define RPB_BIF_CNTL2__PAGE_PRI_EN_MASK 0x00001000L #define RPB_BIF_CNTL2__VC5_TR_PRI_EN_MASK 0x00002000L #define RPB_BIF_CNTL2__VC0_TR_PRI_EN_MASK 0x00004000L #define RPB_BIF_CNTL2__VC0_CHAINED_OVERRIDE_MASK 0x00008000L #define RPB_BIF_CNTL2__PARITY_CHECK_EN_MASK 0x00010000L #define RPB_BIF_CNTL2__NBIF_HST_COMPCLKCTL_EN_MASK 0x00020000L #define RPB_BIF_CNTL2__RESERVE_MASK 0xFE000000L //ATHUB_MISC_CNTL #define ATHUB_MISC_CNTL__CG_OFFDLY__SHIFT 0x0 #define ATHUB_MISC_CNTL__CG_ENABLE__SHIFT 0x6 #define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE__SHIFT 0x7 #define ATHUB_MISC_CNTL__PG_ENABLE__SHIFT 0x8 #define ATHUB_MISC_CNTL__PG_OFFDLY__SHIFT 0x9 #define ATHUB_MISC_CNTL__ALWAYS_BUSY__SHIFT 0xf #define ATHUB_MISC_CNTL__CG_STATUS__SHIFT 0x10 #define ATHUB_MISC_CNTL__PG_STATUS__SHIFT 0x11 #define ATHUB_MISC_CNTL__RPB_BUSY__SHIFT 0x12 #define ATHUB_MISC_CNTL__XPB_BUSY__SHIFT 0x13 #define ATHUB_MISC_CNTL__ATS_BUSY__SHIFT 0x14 #define ATHUB_MISC_CNTL__SDPNCS_BUSY__SHIFT 0x15 #define ATHUB_MISC_CNTL__DFPORT_BUSY__SHIFT 0x16 #define ATHUB_MISC_CNTL__SWITCH_CNTL__SHIFT 0x17 #define ATHUB_MISC_CNTL__LS_DELAY_ENABLE__SHIFT 0x18 #define ATHUB_MISC_CNTL__LS_DELAY_TIME__SHIFT 0x19 #define ATHUB_MISC_CNTL__RESETB_PG_CLK_GATING_ENABLE__SHIFT 0x1e #define ATHUB_MISC_CNTL__RM_VALID_ENABLE__SHIFT 0x1f #define ATHUB_MISC_CNTL__CG_OFFDLY_MASK 0x0000003FL #define ATHUB_MISC_CNTL__CG_ENABLE_MASK 0x00000040L #define ATHUB_MISC_CNTL__CG_MEM_LS_ENABLE_MASK 0x00000080L #define ATHUB_MISC_CNTL__PG_ENABLE_MASK 0x00000100L #define ATHUB_MISC_CNTL__PG_OFFDLY_MASK 0x00007E00L #define ATHUB_MISC_CNTL__ALWAYS_BUSY_MASK 0x00008000L #define ATHUB_MISC_CNTL__CG_STATUS_MASK 0x00010000L #define ATHUB_MISC_CNTL__PG_STATUS_MASK 0x00020000L #define ATHUB_MISC_CNTL__RPB_BUSY_MASK 0x00040000L #define ATHUB_MISC_CNTL__XPB_BUSY_MASK 0x00080000L #define ATHUB_MISC_CNTL__ATS_BUSY_MASK 0x00100000L #define ATHUB_MISC_CNTL__SDPNCS_BUSY_MASK 0x00200000L #define ATHUB_MISC_CNTL__DFPORT_BUSY_MASK 0x00400000L #define ATHUB_MISC_CNTL__SWITCH_CNTL_MASK 0x00800000L #define ATHUB_MISC_CNTL__LS_DELAY_ENABLE_MASK 0x01000000L #define ATHUB_MISC_CNTL__LS_DELAY_TIME_MASK 0x3E000000L #define ATHUB_MISC_CNTL__RESETB_PG_CLK_GATING_ENABLE_MASK 0x40000000L #define ATHUB_MISC_CNTL__RM_VALID_ENABLE_MASK 0x80000000L //ATHUB_MEM_POWER_LS #define ATHUB_MEM_POWER_LS__LS_SETUP__SHIFT 0x0 #define ATHUB_MEM_POWER_LS__LS_HOLD__SHIFT 0x6 #define ATHUB_MEM_POWER_LS__LS_SETUP_MASK 0x0000003FL #define ATHUB_MEM_POWER_LS__LS_HOLD_MASK 0x0007FFC0L //RPB_SDPPORT_CNTL #define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE__SHIFT 0x0 #define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE__SHIFT 0x1 #define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT__SHIFT 0x3 #define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER__SHIFT 0x4 #define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS__SHIFT 0x5 #define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD__SHIFT 0x6 #define RPB_SDPPORT_CNTL__RESERVE1__SHIFT 0xa #define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN__SHIFT 0x16 #define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV__SHIFT 0x17 #define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN__SHIFT 0x18 #define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV__SHIFT 0x19 #define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN__SHIFT 0x1a #define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV__SHIFT 0x1b #define RPB_SDPPORT_CNTL__CG_BUSY_PORT__SHIFT 0x1c #define RPB_SDPPORT_CNTL__RESERVE__SHIFT 0x1d #define RPB_SDPPORT_CNTL__NBIF_DMA_SELF_ACTIVATE_MASK 0x00000001L #define RPB_SDPPORT_CNTL__NBIF_DMA_CFG_MODE_MASK 0x00000006L #define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_REISSUE_CREDIT_MASK 0x00000008L #define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_SATURATE_COUNTER_MASK 0x00000010L #define RPB_SDPPORT_CNTL__NBIF_DMA_ENABLE_DISRUPT_FULLDIS_MASK 0x00000020L #define RPB_SDPPORT_CNTL__NBIF_DMA_HALT_THRESHOLD_MASK 0x000003C0L #define RPB_SDPPORT_CNTL__RESERVE1_MASK 0x003FFC00L #define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKEN_MASK 0x00400000L #define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPCKENRCV_MASK 0x00800000L #define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKEN_MASK 0x01000000L #define RPB_SDPPORT_CNTL__DF_SDPVDCI_RDRSPDATACKENRCV_MASK 0x02000000L #define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKEN_MASK 0x04000000L #define RPB_SDPPORT_CNTL__DF_SDPVDCI_WRRSPCKENRCV_MASK 0x08000000L #define RPB_SDPPORT_CNTL__CG_BUSY_PORT_MASK 0x10000000L #define RPB_SDPPORT_CNTL__RESERVE_MASK 0xE0000000L //RPB_NBIF_SDPPORT_CNTL #define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_WRRSP_CRD__SHIFT 0x0 #define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_RDRSP_CRD__SHIFT 0x8 #define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_REQ_CRD__SHIFT 0x10 #define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_DATA_CRD__SHIFT 0x18 #define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_WRRSP_CRD_MASK 0x000000FFL #define RPB_NBIF_SDPPORT_CNTL__NBIF_DMA_RDRSP_CRD_MASK 0x0000FF00L #define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_REQ_CRD_MASK 0x00FF0000L #define RPB_NBIF_SDPPORT_CNTL__NBIF_HST_DATA_CRD_MASK 0xFF000000L //RPB_DEINTRLV_COMBINE_CNTL #define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER__SHIFT 0x0 #define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN__SHIFT 0x4 #define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE__SHIFT 0x5 #define RPB_DEINTRLV_COMBINE_CNTL__XPB_WRREQ_CRD__SHIFT 0x6 #define RPB_DEINTRLV_COMBINE_CNTL__WC_CLI_INTLV_EN__SHIFT 0xe #define RPB_DEINTRLV_COMBINE_CNTL__RESERVE__SHIFT 0xf #define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_FLUSH_TIMER_MASK 0x0000000FL #define RPB_DEINTRLV_COMBINE_CNTL__WC_CHAINED_BREAK_EN_MASK 0x00000010L #define RPB_DEINTRLV_COMBINE_CNTL__WC_HANDLE_CHECK_DISABLE_MASK 0x00000020L #define RPB_DEINTRLV_COMBINE_CNTL__XPB_WRREQ_CRD_MASK 0x00003FC0L #define RPB_DEINTRLV_COMBINE_CNTL__WC_CLI_INTLV_EN_MASK 0x00004000L #define RPB_DEINTRLV_COMBINE_CNTL__RESERVE_MASK 0xFFFF8000L //RPB_VC_SWITCH_RDWR #define RPB_VC_SWITCH_RDWR__MODE__SHIFT 0x0 #define RPB_VC_SWITCH_RDWR__NUM_RD__SHIFT 0x2 #define RPB_VC_SWITCH_RDWR__NUM_WR__SHIFT 0xa #define RPB_VC_SWITCH_RDWR__XPB_RDREQ_CRD__SHIFT 0x12 #define RPB_VC_SWITCH_RDWR__CENTER_MARGIN__SHIFT 0x1a #define RPB_VC_SWITCH_RDWR__MODE_MASK 0x00000003L #define RPB_VC_SWITCH_RDWR__NUM_RD_MASK 0x000003FCL #define RPB_VC_SWITCH_RDWR__NUM_WR_MASK 0x0003FC00L #define RPB_VC_SWITCH_RDWR__XPB_RDREQ_CRD_MASK 0x03FC0000L #define RPB_VC_SWITCH_RDWR__CENTER_MARGIN_MASK 0xFC000000L //RPB_PERF_COUNTER_CNTL #define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 #define RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER__SHIFT 0x2 #define RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS__SHIFT 0x3 #define RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION__SHIFT 0x4 #define RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS__SHIFT 0x5 #define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0__SHIFT 0x9 #define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1__SHIFT 0xe #define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2__SHIFT 0x13 #define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3__SHIFT 0x18 #define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_SELECT_MASK 0x00000003L #define RPB_PERF_COUNTER_CNTL__CLEAR_SELECTED_PERF_COUNTER_MASK 0x00000004L #define RPB_PERF_COUNTER_CNTL__CLEAR_ALL_PERF_COUNTERS_MASK 0x00000008L #define RPB_PERF_COUNTER_CNTL__STOP_ON_COUNTER_SATURATION_MASK 0x00000010L #define RPB_PERF_COUNTER_CNTL__ENABLE_PERF_COUNTERS_MASK 0x000001E0L #define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_0_MASK 0x00003E00L #define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_1_MASK 0x0007C000L #define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_2_MASK 0x00F80000L #define RPB_PERF_COUNTER_CNTL__PERF_COUNTER_ASSIGN_3_MASK 0x1F000000L //RPB_PERF_COUNTER_STATUS #define RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE__SHIFT 0x0 #define RPB_PERF_COUNTER_STATUS__PERFORMANCE_COUNTER_VALUE_MASK 0xFFFFFFFFL //RPB_PERFCOUNTER_LO #define RPB_PERFCOUNTER_LO__COUNTER_LO__SHIFT 0x0 #define RPB_PERFCOUNTER_LO__COUNTER_LO_MASK 0xFFFFFFFFL //RPB_PERFCOUNTER_HI #define RPB_PERFCOUNTER_HI__COUNTER_HI__SHIFT 0x0 #define RPB_PERFCOUNTER_HI__COMPARE_VALUE__SHIFT 0x10 #define RPB_PERFCOUNTER_HI__COUNTER_HI_MASK 0x0000FFFFL #define RPB_PERFCOUNTER_HI__COMPARE_VALUE_MASK 0xFFFF0000L //RPB_PERFCOUNTER0_CFG #define RPB_PERFCOUNTER0_CFG__PERF_SEL__SHIFT 0x0 #define RPB_PERFCOUNTER0_CFG__PERF_SEL_END__SHIFT 0x8 #define RPB_PERFCOUNTER0_CFG__PERF_MODE__SHIFT 0x18 #define RPB_PERFCOUNTER0_CFG__ENABLE__SHIFT 0x1c #define RPB_PERFCOUNTER0_CFG__CLEAR__SHIFT 0x1d #define RPB_PERFCOUNTER0_CFG__PERF_SEL_MASK 0x000000FFL #define RPB_PERFCOUNTER0_CFG__PERF_SEL_END_MASK 0x0000FF00L #define RPB_PERFCOUNTER0_CFG__PERF_MODE_MASK 0x0F000000L #define RPB_PERFCOUNTER0_CFG__ENABLE_MASK 0x10000000L #define RPB_PERFCOUNTER0_CFG__CLEAR_MASK 0x20000000L //RPB_PERFCOUNTER1_CFG #define RPB_PERFCOUNTER1_CFG__PERF_SEL__SHIFT 0x0 #define RPB_PERFCOUNTER1_CFG__PERF_SEL_END__SHIFT 0x8 #define RPB_PERFCOUNTER1_CFG__PERF_MODE__SHIFT 0x18 #define RPB_PERFCOUNTER1_CFG__ENABLE__SHIFT 0x1c #define RPB_PERFCOUNTER1_CFG__CLEAR__SHIFT 0x1d #define RPB_PERFCOUNTER1_CFG__PERF_SEL_MASK 0x000000FFL #define RPB_PERFCOUNTER1_CFG__PERF_SEL_END_MASK 0x0000FF00L #define RPB_PERFCOUNTER1_CFG__PERF_MODE_MASK 0x0F000000L #define RPB_PERFCOUNTER1_CFG__ENABLE_MASK 0x10000000L #define RPB_PERFCOUNTER1_CFG__CLEAR_MASK 0x20000000L //RPB_PERFCOUNTER2_CFG #define RPB_PERFCOUNTER2_CFG__PERF_SEL__SHIFT 0x0 #define RPB_PERFCOUNTER2_CFG__PERF_SEL_END__SHIFT 0x8 #define RPB_PERFCOUNTER2_CFG__PERF_MODE__SHIFT 0x18 #define RPB_PERFCOUNTER2_CFG__ENABLE__SHIFT 0x1c #define RPB_PERFCOUNTER2_CFG__CLEAR__SHIFT 0x1d #define RPB_PERFCOUNTER2_CFG__PERF_SEL_MASK 0x000000FFL #define RPB_PERFCOUNTER2_CFG__PERF_SEL_END_MASK 0x0000FF00L #define RPB_PERFCOUNTER2_CFG__PERF_MODE_MASK 0x0F000000L #define RPB_PERFCOUNTER2_CFG__ENABLE_MASK 0x10000000L #define RPB_PERFCOUNTER2_CFG__CLEAR_MASK 0x20000000L //RPB_PERFCOUNTER3_CFG #define RPB_PERFCOUNTER3_CFG__PERF_SEL__SHIFT 0x0 #define RPB_PERFCOUNTER3_CFG__PERF_SEL_END__SHIFT 0x8 #define RPB_PERFCOUNTER3_CFG__PERF_MODE__SHIFT 0x18 #define RPB_PERFCOUNTER3_CFG__ENABLE__SHIFT 0x1c #define RPB_PERFCOUNTER3_CFG__CLEAR__SHIFT 0x1d #define RPB_PERFCOUNTER3_CFG__PERF_SEL_MASK 0x000000FFL #define RPB_PERFCOUNTER3_CFG__PERF_SEL_END_MASK 0x0000FF00L #define RPB_PERFCOUNTER3_CFG__PERF_MODE_MASK 0x0F000000L #define RPB_PERFCOUNTER3_CFG__ENABLE_MASK 0x10000000L #define RPB_PERFCOUNTER3_CFG__CLEAR_MASK 0x20000000L //RPB_PERFCOUNTER_RSLT_CNTL #define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT__SHIFT 0x0 #define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER__SHIFT 0x8 #define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER__SHIFT 0x10 #define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY__SHIFT 0x18 #define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL__SHIFT 0x19 #define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE__SHIFT 0x1a #define RPB_PERFCOUNTER_RSLT_CNTL__PERF_COUNTER_SELECT_MASK 0x0000000FL #define RPB_PERFCOUNTER_RSLT_CNTL__START_TRIGGER_MASK 0x0000FF00L #define RPB_PERFCOUNTER_RSLT_CNTL__STOP_TRIGGER_MASK 0x00FF0000L #define RPB_PERFCOUNTER_RSLT_CNTL__ENABLE_ANY_MASK 0x01000000L #define RPB_PERFCOUNTER_RSLT_CNTL__CLEAR_ALL_MASK 0x02000000L #define RPB_PERFCOUNTER_RSLT_CNTL__STOP_ALL_ON_SATURATE_MASK 0x04000000L //RPB_ATS_CNTL3 #define RPB_ATS_CNTL3__RPB_ATS_VC5_TR__SHIFT 0x0 #define RPB_ATS_CNTL3__RPB_ATS_VC0_TR__SHIFT 0x9 #define RPB_ATS_CNTL3__RPB_ATS_PR__SHIFT 0x12 #define RPB_ATS_CNTL3__RPB_ATS_VC5_TR_MASK 0x000001FFL #define RPB_ATS_CNTL3__RPB_ATS_VC0_TR_MASK 0x0003FE00L #define RPB_ATS_CNTL3__RPB_ATS_PR_MASK 0x07FC0000L //RPB_DF_SDPPORT_CNTL #define RPB_DF_SDPPORT_CNTL__DF_REQ_CRD__SHIFT 0x0 #define RPB_DF_SDPPORT_CNTL__DF_DATA_CRD__SHIFT 0x6 #define RPB_DF_SDPPORT_CNTL__DF_HALT_THRESHOLD__SHIFT 0xc #define RPB_DF_SDPPORT_CNTL__DF_RELEASE_CREDIT_MODE__SHIFT 0x10 #define RPB_DF_SDPPORT_CNTL__DF_INSERT_PARITY_ERR__SHIFT 0x11 #define RPB_DF_SDPPORT_CNTL__DF_BUSY_INCLUDE_CONN__SHIFT 0x12 #define RPB_DF_SDPPORT_CNTL__DF_ORIG_ACK_TIMER__SHIFT 0x13 #define RPB_DF_SDPPORT_CNTL__RESERVE__SHIFT 0x1b #define RPB_DF_SDPPORT_CNTL__DF_REQ_CRD_MASK 0x0000003FL #define RPB_DF_SDPPORT_CNTL__DF_DATA_CRD_MASK 0x00000FC0L #define RPB_DF_SDPPORT_CNTL__DF_HALT_THRESHOLD_MASK 0x0000F000L #define RPB_DF_SDPPORT_CNTL__DF_RELEASE_CREDIT_MODE_MASK 0x00010000L #define RPB_DF_SDPPORT_CNTL__DF_INSERT_PARITY_ERR_MASK 0x00020000L #define RPB_DF_SDPPORT_CNTL__DF_BUSY_INCLUDE_CONN_MASK 0x00040000L #define RPB_DF_SDPPORT_CNTL__DF_ORIG_ACK_TIMER_MASK 0x07F80000L #define RPB_DF_SDPPORT_CNTL__RESERVE_MASK 0xF8000000L //RPB_ATS_CNTL #define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE__SHIFT 0x0 #define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE__SHIFT 0x1 #define RPB_ATS_CNTL__SWITCH_THRESHOLD__SHIFT 0x2 #define RPB_ATS_CNTL__TIME_SLICE__SHIFT 0x7 #define RPB_ATS_CNTL__ATCTR_VC0_SWITCH_NUM__SHIFT 0xf #define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM__SHIFT 0x13 #define RPB_ATS_CNTL__WR_AT__SHIFT 0x17 #define RPB_ATS_CNTL__MM_TRANS_VC5_ENABLE__SHIFT 0x19 #define RPB_ATS_CNTL__GC_TRANS_VC5_ENABLE__SHIFT 0x1a #define RPB_ATS_CNTL__PAGE_MIN_LATENCY_ENABLE_MASK 0x00000001L #define RPB_ATS_CNTL__TR_MIN_LATENCY_ENABLE_MASK 0x00000002L #define RPB_ATS_CNTL__SWITCH_THRESHOLD_MASK 0x0000007CL #define RPB_ATS_CNTL__TIME_SLICE_MASK 0x00007F80L #define RPB_ATS_CNTL__ATCTR_VC0_SWITCH_NUM_MASK 0x00078000L #define RPB_ATS_CNTL__ATCPAGE_SWITCH_NUM_MASK 0x00780000L #define RPB_ATS_CNTL__WR_AT_MASK 0x01800000L #define RPB_ATS_CNTL__MM_TRANS_VC5_ENABLE_MASK 0x02000000L #define RPB_ATS_CNTL__GC_TRANS_VC5_ENABLE_MASK 0x04000000L //RPB_ATS_CNTL2 #define RPB_ATS_CNTL2__INVAL_COM_CMD__SHIFT 0x0 #define RPB_ATS_CNTL2__TRANS_CMD__SHIFT 0x6 #define RPB_ATS_CNTL2__PAGE_REQ_CMD__SHIFT 0xc #define RPB_ATS_CNTL2__PAGE_ROUTING_CODE__SHIFT 0x12 #define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE__SHIFT 0x15 #define RPB_ATS_CNTL2__VENDOR_ID__SHIFT 0x18 #define RPB_ATS_CNTL2__RPB_VC5_CRD__SHIFT 0x1a #define RPB_ATS_CNTL2__INVAL_COM_CMD_MASK 0x0000003FL #define RPB_ATS_CNTL2__TRANS_CMD_MASK 0x00000FC0L #define RPB_ATS_CNTL2__PAGE_REQ_CMD_MASK 0x0003F000L #define RPB_ATS_CNTL2__PAGE_ROUTING_CODE_MASK 0x001C0000L #define RPB_ATS_CNTL2__INVAL_COM_ROUTING_CODE_MASK 0x00E00000L #define RPB_ATS_CNTL2__VENDOR_ID_MASK 0x03000000L #define RPB_ATS_CNTL2__RPB_VC5_CRD_MASK 0x7C000000L #endif
/* SPDX-License-Identifier: GPL-2.0-only */ /* * TI clock drivers support * * Copyright (C) 2013 Texas Instruments, Inc. */ #ifndef __LINUX_CLK_TI_H__ #define __LINUX_CLK_TI_H__ #include <linux/clk-provider.h> #include <linux/clkdev.h> /** * struct clk_omap_reg - OMAP register declaration * @offset: offset from the master IP module base address * @bit: register bit offset * @index: index of the master IP module * @flags: flags */ struct clk_omap_reg { void __iomem *ptr; u16 offset; u8 bit; u8 index; u8 flags; }; /** * struct dpll_data - DPLL registers and integration data * @mult_div1_reg: register containing the DPLL M and N bitfields * @mult_mask: mask of the DPLL M bitfield in @mult_div1_reg * @div1_mask: mask of the DPLL N bitfield in @mult_div1_reg * @clk_bypass: struct clk_hw pointer to the clock's bypass clock input * @clk_ref: struct clk_hw pointer to the clock's reference clock input * @control_reg: register containing the DPLL mode bitfield * @enable_mask: mask of the DPLL mode bitfield in @control_reg * @last_rounded_rate: cache of the last rate result of omap2_dpll_round_rate() * @last_rounded_m: cache of the last M result of omap2_dpll_round_rate() * @last_rounded_m4xen: cache of the last M4X result of * omap4_dpll_regm4xen_round_rate() * @last_rounded_lpmode: cache of the last lpmode result of * omap4_dpll_lpmode_recalc() * @max_multiplier: maximum valid non-bypass multiplier value (actual) * @last_rounded_n: cache of the last N result of omap2_dpll_round_rate() * @min_divider: minimum valid non-bypass divider value (actual) * @max_divider: maximum valid non-bypass divider value (actual) * @max_rate: maximum clock rate for the DPLL * @modes: possible values of @enable_mask * @autoidle_reg: register containing the DPLL autoidle mode bitfield * @idlest_reg: register containing the DPLL idle status bitfield * @autoidle_mask: mask of the DPLL autoidle mode bitfield in @autoidle_reg * @freqsel_mask: mask of the DPLL jitter correction bitfield in @control_reg * @dcc_mask: mask of the DPLL DCC correction bitfield @mult_div1_reg * @dcc_rate: rate atleast which DCC @dcc_mask must be set * @idlest_mask: mask of the DPLL idle status bitfield in @idlest_reg * @lpmode_mask: mask of the DPLL low-power mode bitfield in @control_reg * @m4xen_mask: mask of the DPLL M4X multiplier bitfield in @control_reg * @auto_recal_bit: bitshift of the driftguard enable bit in @control_reg * @recal_en_bit: bitshift of the PRM_IRQENABLE_* bit for recalibration IRQs * @recal_st_bit: bitshift of the PRM_IRQSTATUS_* bit for recalibration IRQs * @ssc_deltam_reg: register containing the DPLL SSC frequency spreading * @ssc_modfreq_reg: register containing the DPLL SSC modulation frequency * @ssc_modfreq_mant_mask: mask of the mantissa component in @ssc_modfreq_reg * @ssc_modfreq_exp_mask: mask of the exponent component in @ssc_modfreq_reg * @ssc_enable_mask: mask of the DPLL SSC enable bit in @control_reg * @ssc_downspread_mask: mask of the DPLL SSC low frequency only bit in * @control_reg * @ssc_modfreq: the DPLL SSC frequency modulation in kHz * @ssc_deltam: the DPLL SSC frequency spreading in permille (10th of percent) * @ssc_downspread: require the only low frequency spread of the DPLL in SSC * mode * @flags: DPLL type/features (see below) * * Possible values for @flags: * DPLL_J_TYPE: "J-type DPLL" (only some 36xx, 4xxx DPLLs) * * @freqsel_mask is only used on the OMAP34xx family and AM35xx. * * XXX Some DPLLs have multiple bypass inputs, so it's not technically * correct to only have one @clk_bypass pointer. * * XXX The runtime-variable fields (@last_rounded_rate, @last_rounded_m, * @last_rounded_n) should be separated from the runtime-fixed fields * and placed into a different structure, so that the runtime-fixed data * can be placed into read-only space. */ struct dpll_data { struct clk_omap_reg mult_div1_reg; u32 mult_mask; u32 div1_mask; struct clk_hw *clk_bypass; struct clk_hw *clk_ref; struct clk_omap_reg control_reg; u32 enable_mask; unsigned long last_rounded_rate; u16 last_rounded_m; u8 last_rounded_m4xen; u8 last_rounded_lpmode; u16 max_multiplier; u8 last_rounded_n; u8 min_divider; u16 max_divider; unsigned long max_rate; u8 modes; struct clk_omap_reg autoidle_reg; struct clk_omap_reg idlest_reg; u32 autoidle_mask; u32 freqsel_mask; u32 idlest_mask; u32 dco_mask; u32 sddiv_mask; u32 dcc_mask; unsigned long dcc_rate; u32 lpmode_mask; u32 m4xen_mask; u8 auto_recal_bit; u8 recal_en_bit; u8 recal_st_bit; struct clk_omap_reg ssc_deltam_reg; struct clk_omap_reg ssc_modfreq_reg; u32 ssc_deltam_int_mask; u32 ssc_deltam_frac_mask; u32 ssc_modfreq_mant_mask; u32 ssc_modfreq_exp_mask; u32 ssc_enable_mask; u32 ssc_downspread_mask; u32 ssc_modfreq; u32 ssc_deltam; bool ssc_downspread; u8 flags; }; struct clk_hw_omap; /** * struct clk_hw_omap_ops - OMAP clk ops * @find_idlest: find idlest register information for a clock * @find_companion: find companion clock register information for a clock, * basically converts CM_ICLKEN* <-> CM_FCLKEN* * @allow_idle: enables autoidle hardware functionality for a clock * @deny_idle: prevent autoidle hardware functionality for a clock */ struct clk_hw_omap_ops { void (*find_idlest)(struct clk_hw_omap *oclk, struct clk_omap_reg *idlest_reg, u8 *idlest_bit, u8 *idlest_val); void (*find_companion)(struct clk_hw_omap *oclk, struct clk_omap_reg *other_reg, u8 *other_bit); void (*allow_idle)(struct clk_hw_omap *oclk); void (*deny_idle)(struct clk_hw_omap *oclk); }; /** * struct clk_hw_omap - OMAP struct clk * @node: list_head connecting this clock into the full clock list * @enable_reg: register to write to enable the clock (see @enable_bit) * @enable_bit: bitshift to write to enable/disable the clock (see @enable_reg) * @flags: see "struct clk.flags possibilities" above * @clksel_reg: for clksel clks, register va containing src/divisor select * @dpll_data: for DPLLs, pointer to struct dpll_data for this clock * @clkdm_name: clockdomain name that this clock is contained in * @clkdm: pointer to struct clockdomain, resolved from @clkdm_name at runtime * @ops: clock ops for this clock */ struct clk_hw_omap { struct clk_hw hw; struct list_head node; unsigned long fixed_rate; u8 fixed_div; struct clk_omap_reg enable_reg; u8 enable_bit; unsigned long flags; struct clk_omap_reg clksel_reg; struct dpll_data *dpll_data; const char *clkdm_name; struct clockdomain *clkdm; const struct clk_hw_omap_ops *ops; u32 context; int autoidle_count; }; /* * struct clk_hw_omap.flags possibilities * * XXX document the rest of the clock flags here * * ENABLE_REG_32BIT: (OMAP1 only) clock control register must be accessed * with 32bit ops, by default OMAP1 uses 16bit ops. * CLOCK_IDLE_CONTROL: (OMAP1 only) clock has autoidle support. * CLOCK_NO_IDLE_PARENT: (OMAP1 only) when clock is enabled, its parent * clock is put to no-idle mode. * ENABLE_ON_INIT: Clock is enabled on init. * INVERT_ENABLE: By default, clock enable bit behavior is '1' enable, '0' * disable. This inverts the behavior making '0' enable and '1' disable. * CLOCK_CLKOUTX2: (OMAP4 only) DPLL CLKOUT and CLKOUTX2 GATE_CTRL * bits share the same register. This flag allows the * omap4_dpllmx*() code to determine which GATE_CTRL bit field * should be used. This is a temporary solution - a better approach * would be to associate clock type-specific data with the clock, * similar to the struct dpll_data approach. */ #define ENABLE_REG_32BIT (1 << 0) /* Use 32-bit access */ #define CLOCK_IDLE_CONTROL (1 << 1) #define CLOCK_NO_IDLE_PARENT (1 << 2) #define ENABLE_ON_INIT (1 << 3) /* Enable upon framework init */ #define INVERT_ENABLE (1 << 4) /* 0 enables, 1 disables */ #define CLOCK_CLKOUTX2 (1 << 5) /* CM_CLKEN_PLL*.EN* bit values - not all are available for every DPLL */ #define DPLL_LOW_POWER_STOP 0x1 #define DPLL_LOW_POWER_BYPASS 0x5 #define DPLL_LOCKED 0x7 /* DPLL Type and DCO Selection Flags */ #define DPLL_J_TYPE 0x1 /* Static memmap indices */ enum { TI_CLKM_CM = 0, TI_CLKM_CM2, TI_CLKM_PRM, TI_CLKM_SCRM, TI_CLKM_CTRL, TI_CLKM_CTRL_AUX, TI_CLKM_PLLSS, CLK_MAX_MEMMAPS }; /** * struct ti_clk_ll_ops - low-level ops for clocks * @clk_readl: pointer to register read function * @clk_writel: pointer to register write function * @clk_rmw: pointer to register read-modify-write function * @clkdm_clk_enable: pointer to clockdomain enable function * @clkdm_clk_disable: pointer to clockdomain disable function * @clkdm_lookup: pointer to clockdomain lookup function * @cm_wait_module_ready: pointer to CM module wait ready function * @cm_split_idlest_reg: pointer to CM module function to split idlest reg * * Low-level ops are generally used by the basic clock types (clk-gate, * clk-mux, clk-divider etc.) to provide support for various low-level * hadrware interfaces (direct MMIO, regmap etc.), and is initialized * by board code. Low-level ops also contain some other platform specific * operations not provided directly by clock drivers. */ struct ti_clk_ll_ops { u32 (*clk_readl)(const struct clk_omap_reg *reg); void (*clk_writel)(u32 val, const struct clk_omap_reg *reg); void (*clk_rmw)(u32 val, u32 mask, const struct clk_omap_reg *reg); int (*clkdm_clk_enable)(struct clockdomain *clkdm, struct clk *clk); int (*clkdm_clk_disable)(struct clockdomain *clkdm, struct clk *clk); struct clockdomain * (*clkdm_lookup)(const char *name); int (*cm_wait_module_ready)(u8 part, s16 prcm_mod, u16 idlest_reg, u8 idlest_shift); int (*cm_split_idlest_reg)(struct clk_omap_reg *idlest_reg, s16 *prcm_inst, u8 *idlest_reg_id); }; #define to_clk_hw_omap(_hw) container_of(_hw, struct clk_hw_omap, hw) bool omap2_clk_is_hw_omap(struct clk_hw *hw); int omap2_clk_disable_autoidle_all(void); int omap2_clk_enable_autoidle_all(void); int omap2_clk_allow_idle(struct clk *clk); int omap2_clk_deny_idle(struct clk *clk); unsigned long omap2_dpllcore_recalc(struct clk_hw *hw, unsigned long parent_rate); int omap2_reprogram_dpllcore(struct clk_hw *clk, unsigned long rate, unsigned long parent_rate); void omap2xxx_clkt_dpllcore_init(struct clk_hw *hw); void omap2xxx_clkt_vps_init(void); unsigned long omap2_get_dpll_rate(struct clk_hw_omap *clk); void ti_dt_clk_init_retry_clks(void); void ti_dt_clockdomains_setup(void); int ti_clk_setup_ll_ops(struct ti_clk_ll_ops *ops); struct regmap; int omap2_clk_provider_init(struct device_node *parent, int index, struct regmap *syscon, void __iomem *mem); void omap2_clk_legacy_provider_init(int index, void __iomem *mem); int omap3430_dt_clk_init(void); int omap3630_dt_clk_init(void); int am35xx_dt_clk_init(void); int dm814x_dt_clk_init(void); int dm816x_dt_clk_init(void); int omap4xxx_dt_clk_init(void); int omap5xxx_dt_clk_init(void); int dra7xx_dt_clk_init(void); int am33xx_dt_clk_init(void); int am43xx_dt_clk_init(void); int omap2420_dt_clk_init(void); int omap2430_dt_clk_init(void); struct ti_clk_features { u32 flags; long fint_min; long fint_max; long fint_band1_max; long fint_band2_min; u8 dpll_bypass_vals; u8 cm_idlest_val; }; #define TI_CLK_DPLL_HAS_FREQSEL BIT(0) #define TI_CLK_DPLL4_DENY_REPROGRAM BIT(1) #define TI_CLK_DISABLE_CLKDM_CONTROL BIT(2) #define TI_CLK_ERRATA_I810 BIT(3) #define TI_CLK_CLKCTRL_COMPAT BIT(4) #define TI_CLK_DEVICE_TYPE_GP BIT(5) void ti_clk_setup_features(struct ti_clk_features *features); const struct ti_clk_features *ti_clk_get_features(void); bool ti_clk_is_in_standby(struct clk *clk); int omap3_noncore_dpll_save_context(struct clk_hw *hw); void omap3_noncore_dpll_restore_context(struct clk_hw *hw); int omap3_core_dpll_save_context(struct clk_hw *hw); void omap3_core_dpll_restore_context(struct clk_hw *hw); extern const struct clk_hw_omap_ops clkhwops_omap2xxx_dpll; #ifdef CONFIG_ATAGS int omap3430_clk_legacy_init(void); int omap3430es1_clk_legacy_init(void); int omap36xx_clk_legacy_init(void); int am35xx_clk_legacy_init(void); #else static inline int omap3430_clk_legacy_init(void) { return -ENXIO; } static inline int omap3430es1_clk_legacy_init(void) { return -ENXIO; } static inline int omap36xx_clk_legacy_init(void) { return -ENXIO; } static inline int am35xx_clk_legacy_init(void) { return -ENXIO; } #endif #endif
// SPDX-License-Identifier: GPL-2.0 /****************************************************************************** * * Copyright(c) 2007 - 2012 Realtek Corporation. All rights reserved. * ******************************************************************************/ #include <drv_types.h> #include <hal_com_h2c.h> static unsigned char ARTHEROS_OUI1[] = {0x00, 0x03, 0x7f}; static unsigned char ARTHEROS_OUI2[] = {0x00, 0x13, 0x74}; static unsigned char BROADCOM_OUI1[] = {0x00, 0x10, 0x18}; static unsigned char BROADCOM_OUI2[] = {0x00, 0x0a, 0xf7}; static unsigned char BROADCOM_OUI3[] = {0x00, 0x05, 0xb5}; static unsigned char CISCO_OUI[] = {0x00, 0x40, 0x96}; static unsigned char MARVELL_OUI[] = {0x00, 0x50, 0x43}; static unsigned char RALINK_OUI[] = {0x00, 0x0c, 0x43}; static unsigned char REALTEK_OUI[] = {0x00, 0xe0, 0x4c}; static unsigned char AIRGOCAP_OUI[] = {0x00, 0x0a, 0xf5}; static unsigned char RSN_TKIP_CIPHER[4] = {0x00, 0x0f, 0xac, 0x02}; static unsigned char WPA_TKIP_CIPHER[4] = {0x00, 0x50, 0xf2, 0x02}; /* define WAIT_FOR_BCN_TO_MIN (3000) */ #define WAIT_FOR_BCN_TO_MIN (6000) #define WAIT_FOR_BCN_TO_MAX (20000) #define DISCONNECT_BY_CHK_BCN_FAIL_OBSERV_PERIOD_IN_MS 1000 #define DISCONNECT_BY_CHK_BCN_FAIL_THRESHOLD 3 static u8 rtw_basic_rate_cck[4] = { IEEE80211_CCK_RATE_1MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_2MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_5MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_CCK_RATE_11MB | IEEE80211_BASIC_RATE_MASK }; static u8 rtw_basic_rate_ofdm[3] = { IEEE80211_OFDM_RATE_6MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_12MB | IEEE80211_BASIC_RATE_MASK, IEEE80211_OFDM_RATE_24MB | IEEE80211_BASIC_RATE_MASK }; u8 networktype_to_raid_ex(struct adapter *adapter, struct sta_info *psta) { u8 raid; switch (psta->wireless_mode) { case WIRELESS_11B: raid = RATEID_IDX_B; break; case WIRELESS_11G: raid = RATEID_IDX_G; break; case WIRELESS_11BG: raid = RATEID_IDX_BG; break; case WIRELESS_11_24N: case WIRELESS_11G_24N: raid = RATEID_IDX_GN_N1SS; break; case WIRELESS_11B_24N: case WIRELESS_11BG_24N: if (psta->bw_mode == CHANNEL_WIDTH_20) { raid = RATEID_IDX_BGN_20M_1SS_BN; } else { raid = RATEID_IDX_BGN_40M_1SS; } break; default: raid = RATEID_IDX_BGN_40M_2SS; break; } return raid; } unsigned char ratetbl_val_2wifirate(unsigned char rate); unsigned char ratetbl_val_2wifirate(unsigned char rate) { switch (rate & 0x7f) { case 0: return IEEE80211_CCK_RATE_1MB; case 1: return IEEE80211_CCK_RATE_2MB; case 2: return IEEE80211_CCK_RATE_5MB; case 3: return IEEE80211_CCK_RATE_11MB; case 4: return IEEE80211_OFDM_RATE_6MB; case 5: return IEEE80211_OFDM_RATE_9MB; case 6: return IEEE80211_OFDM_RATE_12MB; case 7: return IEEE80211_OFDM_RATE_18MB; case 8: return IEEE80211_OFDM_RATE_24MB; case 9: return IEEE80211_OFDM_RATE_36MB; case 10: return IEEE80211_OFDM_RATE_48MB; case 11: return IEEE80211_OFDM_RATE_54MB; default: return 0; } } int is_basicrate(struct adapter *padapter, unsigned char rate); int is_basicrate(struct adapter *padapter, unsigned char rate) { int i; unsigned char val; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; for (i = 0; i < NumRates; i++) { val = pmlmeext->basicrate[i]; if ((val != 0xff) && (val != 0xfe)) if (rate == ratetbl_val_2wifirate(val)) return true; } return false; } unsigned int ratetbl2rateset(struct adapter *padapter, unsigned char *rateset); unsigned int ratetbl2rateset(struct adapter *padapter, unsigned char *rateset) { int i; unsigned char rate; unsigned int len = 0; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; for (i = 0; i < NumRates; i++) { rate = pmlmeext->datarate[i]; switch (rate) { case 0xff: return len; case 0xfe: continue; default: rate = ratetbl_val_2wifirate(rate); if (is_basicrate(padapter, rate) == true) rate |= IEEE80211_BASIC_RATE_MASK; rateset[len] = rate; len++; break; } } return len; } void get_rate_set(struct adapter *padapter, unsigned char *pbssrate, int *bssrate_len) { unsigned char supportedrates[NumRates]; memset(supportedrates, 0, NumRates); *bssrate_len = ratetbl2rateset(padapter, supportedrates); memcpy(pbssrate, supportedrates, *bssrate_len); } void set_mcs_rate_by_mask(u8 *mcs_set, u32 mask) { u8 mcs_rate_1r = (u8)(mask&0xff); u8 mcs_rate_2r = (u8)((mask>>8)&0xff); u8 mcs_rate_3r = (u8)((mask>>16)&0xff); u8 mcs_rate_4r = (u8)((mask>>24)&0xff); mcs_set[0] &= mcs_rate_1r; mcs_set[1] &= mcs_rate_2r; mcs_set[2] &= mcs_rate_3r; mcs_set[3] &= mcs_rate_4r; } void UpdateBrateTbl(struct adapter *Adapter, u8 *mBratesOS) { u8 i; u8 rate; /* 1M, 2M, 5.5M, 11M, 6M, 12M, 24M are mandatory. */ for (i = 0; i < NDIS_802_11_LENGTH_RATES_EX; i++) { rate = mBratesOS[i] & 0x7f; switch (rate) { case IEEE80211_CCK_RATE_1MB: case IEEE80211_CCK_RATE_2MB: case IEEE80211_CCK_RATE_5MB: case IEEE80211_CCK_RATE_11MB: case IEEE80211_OFDM_RATE_6MB: case IEEE80211_OFDM_RATE_12MB: case IEEE80211_OFDM_RATE_24MB: mBratesOS[i] |= IEEE80211_BASIC_RATE_MASK; break; } } } void UpdateBrateTblForSoftAP(u8 *bssrateset, u32 bssratelen) { u8 i; u8 rate; for (i = 0; i < bssratelen; i++) { rate = bssrateset[i] & 0x7f; switch (rate) { case IEEE80211_CCK_RATE_1MB: case IEEE80211_CCK_RATE_2MB: case IEEE80211_CCK_RATE_5MB: case IEEE80211_CCK_RATE_11MB: bssrateset[i] |= IEEE80211_BASIC_RATE_MASK; break; } } } void Save_DM_Func_Flag(struct adapter *padapter) { u8 bSaveFlag = true; rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&bSaveFlag)); } void Restore_DM_Func_Flag(struct adapter *padapter) { u8 bSaveFlag = false; rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_OP, (u8 *)(&bSaveFlag)); } void Switch_DM_Func(struct adapter *padapter, u32 mode, u8 enable) { if (enable == true) rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_SET, (u8 *)(&mode)); else rtw_hal_set_hwreg(padapter, HW_VAR_DM_FUNC_CLR, (u8 *)(&mode)); } void Set_MSR(struct adapter *padapter, u8 type) { rtw_hal_set_hwreg(padapter, HW_VAR_MEDIA_STATUS, (u8 *)(&type)); } inline u8 rtw_get_oper_ch(struct adapter *adapter) { return adapter_to_dvobj(adapter)->oper_channel; } inline void rtw_set_oper_ch(struct adapter *adapter, u8 ch) { #ifdef DBG_CH_SWITCH const int len = 128; char msg[128] = {0}; int cnt = 0; int i = 0; #endif /* DBG_CH_SWITCH */ struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); if (dvobj->oper_channel != ch) { dvobj->on_oper_ch_time = jiffies; #ifdef DBG_CH_SWITCH cnt += scnprintf(msg+cnt, len-cnt, "switch to ch %3u", ch); for (i = 0; i < dvobj->iface_nums; i++) { struct adapter *iface = dvobj->padapters[i]; cnt += scnprintf(msg+cnt, len-cnt, " [%s:", ADPT_ARG(iface)); if (iface->mlmeextpriv.cur_channel == ch) cnt += scnprintf(msg+cnt, len-cnt, "C"); else cnt += scnprintf(msg+cnt, len-cnt, "_"); if (iface->wdinfo.listen_channel == ch && !rtw_p2p_chk_state(&iface->wdinfo, P2P_STATE_NONE)) cnt += scnprintf(msg+cnt, len-cnt, "L"); else cnt += scnprintf(msg+cnt, len-cnt, "_"); cnt += scnprintf(msg+cnt, len-cnt, "]"); } #endif /* DBG_CH_SWITCH */ } dvobj->oper_channel = ch; } inline void rtw_set_oper_bw(struct adapter *adapter, u8 bw) { adapter_to_dvobj(adapter)->oper_bwmode = bw; } inline void rtw_set_oper_choffset(struct adapter *adapter, u8 offset) { adapter_to_dvobj(adapter)->oper_ch_offset = offset; } u8 rtw_get_center_ch(u8 channel, u8 chnl_bw, u8 chnl_offset) { u8 center_ch = channel; if (chnl_bw == CHANNEL_WIDTH_40) { if (chnl_offset == HAL_PRIME_CHNL_OFFSET_LOWER) center_ch = channel + 2; else center_ch = channel - 2; } return center_ch; } inline unsigned long rtw_get_on_cur_ch_time(struct adapter *adapter) { if (adapter->mlmeextpriv.cur_channel == adapter_to_dvobj(adapter)->oper_channel) return adapter_to_dvobj(adapter)->on_oper_ch_time; else return 0; } void r8723bs_select_channel(struct adapter *padapter, unsigned char channel) { if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->setch_mutex))) return; /* saved channel info */ rtw_set_oper_ch(padapter, channel); rtw_hal_set_chan(padapter, channel); mutex_unlock(&(adapter_to_dvobj(padapter)->setch_mutex)); } void set_channel_bwmode(struct adapter *padapter, unsigned char channel, unsigned char channel_offset, unsigned short bwmode) { u8 center_ch, chnl_offset80 = HAL_PRIME_CHNL_OFFSET_DONT_CARE; center_ch = rtw_get_center_ch(channel, bwmode, channel_offset); /* set Channel */ if (mutex_lock_interruptible(&(adapter_to_dvobj(padapter)->setch_mutex))) return; /* saved channel/bw info */ rtw_set_oper_ch(padapter, channel); rtw_set_oper_bw(padapter, bwmode); rtw_set_oper_choffset(padapter, channel_offset); rtw_hal_set_chnl_bw(padapter, center_ch, bwmode, channel_offset, chnl_offset80); /* set center channel */ mutex_unlock(&(adapter_to_dvobj(padapter)->setch_mutex)); } inline u8 *get_my_bssid(struct wlan_bssid_ex *pnetwork) { return pnetwork->mac_address; } u16 get_beacon_interval(struct wlan_bssid_ex *bss) { __le16 val; memcpy((unsigned char *)&val, rtw_get_beacon_interval_from_ie(bss->ies), 2); return le16_to_cpu(val); } int is_client_associated_to_ap(struct adapter *padapter) { struct mlme_ext_priv *pmlmeext; struct mlme_ext_info *pmlmeinfo; if (!padapter) return _FAIL; pmlmeext = &padapter->mlmeextpriv; pmlmeinfo = &(pmlmeext->mlmext_info); if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state&0x03) == WIFI_FW_STATION_STATE)) return true; else return _FAIL; } int is_client_associated_to_ibss(struct adapter *padapter) { struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); if ((pmlmeinfo->state & WIFI_FW_ASSOC_SUCCESS) && ((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)) return true; else return _FAIL; } int is_IBSS_empty(struct adapter *padapter) { unsigned int i; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); for (i = IBSS_START_MAC_ID; i < NUM_STA; i++) { if (pmlmeinfo->FW_sta_info[i].status == 1) return _FAIL; } return true; } unsigned int decide_wait_for_beacon_timeout(unsigned int bcn_interval) { if ((bcn_interval << 2) < WAIT_FOR_BCN_TO_MIN) return WAIT_FOR_BCN_TO_MIN; else if ((bcn_interval << 2) > WAIT_FOR_BCN_TO_MAX) return WAIT_FOR_BCN_TO_MAX; else return bcn_interval << 2; } void invalidate_cam_all(struct adapter *padapter) { struct dvobj_priv *dvobj = adapter_to_dvobj(padapter); struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl; rtw_hal_set_hwreg(padapter, HW_VAR_CAM_INVALID_ALL, NULL); spin_lock_bh(&cam_ctl->lock); cam_ctl->bitmap = 0; memset(dvobj->cam_cache, 0, sizeof(struct cam_entry_cache)*TOTAL_CAM_ENTRY); spin_unlock_bh(&cam_ctl->lock); } void _write_cam(struct adapter *padapter, u8 entry, u16 ctrl, u8 *mac, u8 *key) { unsigned int i, val, addr; int j; u32 cam_val[2]; addr = entry << 3; for (j = 5; j >= 0; j--) { switch (j) { case 0: val = (ctrl | (mac[0] << 16) | (mac[1] << 24)); break; case 1: val = (mac[2] | (mac[3] << 8) | (mac[4] << 16) | (mac[5] << 24)); break; default: i = (j - 2) << 2; val = (key[i] | (key[i+1] << 8) | (key[i+2] << 16) | (key[i+3] << 24)); break; } cam_val[0] = val; cam_val[1] = addr + (unsigned int)j; rtw_hal_set_hwreg(padapter, HW_VAR_CAM_WRITE, (u8 *)cam_val); } } void _clear_cam_entry(struct adapter *padapter, u8 entry) { unsigned char null_sta[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; unsigned char null_key[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; _write_cam(padapter, entry, 0, null_sta, null_key); } inline void write_cam(struct adapter *adapter, u8 id, u16 ctrl, u8 *mac, u8 *key) { _write_cam(adapter, id, ctrl, mac, key); write_cam_cache(adapter, id, ctrl, mac, key); } inline void clear_cam_entry(struct adapter *adapter, u8 id) { _clear_cam_entry(adapter, id); clear_cam_cache(adapter, id); } void write_cam_cache(struct adapter *adapter, u8 id, u16 ctrl, u8 *mac, u8 *key) { struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl; spin_lock_bh(&cam_ctl->lock); dvobj->cam_cache[id].ctrl = ctrl; memcpy(dvobj->cam_cache[id].mac, mac, ETH_ALEN); memcpy(dvobj->cam_cache[id].key, key, 16); spin_unlock_bh(&cam_ctl->lock); } void clear_cam_cache(struct adapter *adapter, u8 id) { struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl; spin_lock_bh(&cam_ctl->lock); memset(&(dvobj->cam_cache[id]), 0, sizeof(struct cam_entry_cache)); spin_unlock_bh(&cam_ctl->lock); } static bool _rtw_camid_is_gk(struct adapter *adapter, u8 cam_id) { struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl; bool ret = false; if (cam_id >= TOTAL_CAM_ENTRY) goto exit; if (!(cam_ctl->bitmap & BIT(cam_id))) goto exit; ret = (dvobj->cam_cache[cam_id].ctrl&BIT6)?true:false; exit: return ret; } static s16 _rtw_camid_search(struct adapter *adapter, u8 *addr, s16 kid) { struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); int i; s16 cam_id = -1; for (i = 0; i < TOTAL_CAM_ENTRY; i++) { if (addr && memcmp(dvobj->cam_cache[i].mac, addr, ETH_ALEN)) continue; if (kid >= 0 && kid != (dvobj->cam_cache[i].ctrl&0x03)) continue; cam_id = i; break; } return cam_id; } s16 rtw_camid_search(struct adapter *adapter, u8 *addr, s16 kid) { struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl; s16 cam_id = -1; spin_lock_bh(&cam_ctl->lock); cam_id = _rtw_camid_search(adapter, addr, kid); spin_unlock_bh(&cam_ctl->lock); return cam_id; } s16 rtw_camid_alloc(struct adapter *adapter, struct sta_info *sta, u8 kid) { struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl; s16 cam_id = -1; struct mlme_ext_info *mlmeinfo; spin_lock_bh(&cam_ctl->lock); mlmeinfo = &adapter->mlmeextpriv.mlmext_info; if ((((mlmeinfo->state&0x03) == WIFI_FW_AP_STATE) || ((mlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)) && !sta) { /* AP/Ad-hoc mode group key: static alloction to default key by key ID */ if (kid > 3) { netdev_dbg(adapter->pnetdev, FUNC_ADPT_FMT " group key with invalid key id:%u\n", FUNC_ADPT_ARG(adapter), kid); rtw_warn_on(1); goto bitmap_handle; } cam_id = kid; } else { int i; u8 *addr = sta?sta->hwaddr:NULL; if (!sta) { if (!(mlmeinfo->state & WIFI_FW_ASSOC_SUCCESS)) { /* bypass STA mode group key setting before connected(ex:WEP) because bssid is not ready */ goto bitmap_handle; } addr = get_bssid(&adapter->mlmepriv); } i = _rtw_camid_search(adapter, addr, kid); if (i >= 0) { /* Fix issue that pairwise and group key have same key id. Pairwise key first, group key can overwirte group only(ex: rekey) */ if (sta || _rtw_camid_is_gk(adapter, i)) cam_id = i; else netdev_dbg(adapter->pnetdev, FUNC_ADPT_FMT " group key id:%u the same key id as pairwise key\n", FUNC_ADPT_ARG(adapter), kid); goto bitmap_handle; } for (i = 4; i < TOTAL_CAM_ENTRY; i++) if (!(cam_ctl->bitmap & BIT(i))) break; if (i == TOTAL_CAM_ENTRY) { if (sta) netdev_dbg(adapter->pnetdev, FUNC_ADPT_FMT " pairwise key with %pM id:%u no room\n", FUNC_ADPT_ARG(adapter), MAC_ARG(sta->hwaddr), kid); else netdev_dbg(adapter->pnetdev, FUNC_ADPT_FMT " group key id:%u no room\n", FUNC_ADPT_ARG(adapter), kid); rtw_warn_on(1); goto bitmap_handle; } cam_id = i; } bitmap_handle: if (cam_id >= 0 && cam_id < 32) cam_ctl->bitmap |= BIT(cam_id); spin_unlock_bh(&cam_ctl->lock); return cam_id; } void rtw_camid_free(struct adapter *adapter, u8 cam_id) { struct dvobj_priv *dvobj = adapter_to_dvobj(adapter); struct cam_ctl_t *cam_ctl = &dvobj->cam_ctl; spin_lock_bh(&cam_ctl->lock); if (cam_id < TOTAL_CAM_ENTRY) cam_ctl->bitmap &= ~(BIT(cam_id)); spin_unlock_bh(&cam_ctl->lock); } int allocate_fw_sta_entry(struct adapter *padapter) { unsigned int mac_id; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); for (mac_id = IBSS_START_MAC_ID; mac_id < NUM_STA; mac_id++) { if (pmlmeinfo->FW_sta_info[mac_id].status == 0) { pmlmeinfo->FW_sta_info[mac_id].status = 1; pmlmeinfo->FW_sta_info[mac_id].retry = 0; break; } } return mac_id; } void flush_all_cam_entry(struct adapter *padapter) { struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); invalidate_cam_all(padapter); /* clear default key related key search setting */ rtw_hal_set_hwreg(padapter, HW_VAR_SEC_DK_CFG, (u8 *)false); memset((u8 *)(pmlmeinfo->FW_sta_info), 0, sizeof(pmlmeinfo->FW_sta_info)); } int WMM_param_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE) { /* struct registry_priv *pregpriv = &padapter->registrypriv; */ struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); if (pmlmepriv->qospriv.qos_option == 0) { pmlmeinfo->WMM_enable = 0; return false; } if (!memcmp(&(pmlmeinfo->WMM_param), (pIE->data + 6), sizeof(struct WMM_para_element))) return false; else memcpy(&(pmlmeinfo->WMM_param), (pIE->data + 6), sizeof(struct WMM_para_element)); pmlmeinfo->WMM_enable = 1; return true; } static void sort_wmm_ac_params(u32 *inx, u32 *edca) { u32 i, j, change_inx = false; /* entry indx: 0->vo, 1->vi, 2->be, 3->bk. */ for (i = 0; i < 4; i++) { for (j = i + 1; j < 4; j++) { /* compare CW and AIFS */ if ((edca[j] & 0xFFFF) < (edca[i] & 0xFFFF)) { change_inx = true; } else if ((edca[j] & 0xFFFF) == (edca[i] & 0xFFFF)) { /* compare TXOP */ if ((edca[j] >> 16) > (edca[i] >> 16)) change_inx = true; } if (change_inx) { swap(edca[i], edca[j]); swap(inx[i], inx[j]); change_inx = false; } } } } void WMMOnAssocRsp(struct adapter *padapter) { u8 ACI, ACM, AIFS, ECWMin, ECWMax, aSifsTime; u8 acm_mask; u16 TXOP; u32 acParm, i; u32 edca[4], inx[4]; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct xmit_priv *pxmitpriv = &padapter->xmitpriv; struct registry_priv *pregpriv = &padapter->registrypriv; acm_mask = 0; if (pmlmeext->cur_wireless_mode & WIRELESS_11_24N) aSifsTime = 16; else aSifsTime = 10; if (pmlmeinfo->WMM_enable == 0) { padapter->mlmepriv.acm_mask = 0; AIFS = aSifsTime + (2 * pmlmeinfo->slotTime); if (pmlmeext->cur_wireless_mode & WIRELESS_11G) { ECWMin = 4; ECWMax = 10; } else if (pmlmeext->cur_wireless_mode & WIRELESS_11B) { ECWMin = 5; ECWMax = 10; } else { ECWMin = 4; ECWMax = 10; } TXOP = 0; acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16); rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm)); rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acParm)); rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acParm)); ECWMin = 2; ECWMax = 3; TXOP = 0x2f; acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16); rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acParm)); } else { edca[0] = edca[1] = edca[2] = edca[3] = 0; for (i = 0; i < 4; i++) { ACI = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 5) & 0x03; ACM = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN >> 4) & 0x01; /* AIFS = AIFSN * slot time + SIFS - r2t phy delay */ AIFS = (pmlmeinfo->WMM_param.ac_param[i].ACI_AIFSN & 0x0f) * pmlmeinfo->slotTime + aSifsTime; ECWMin = (pmlmeinfo->WMM_param.ac_param[i].CW & 0x0f); ECWMax = (pmlmeinfo->WMM_param.ac_param[i].CW & 0xf0) >> 4; TXOP = le16_to_cpu(pmlmeinfo->WMM_param.ac_param[i].TXOP_limit); acParm = AIFS | (ECWMin << 8) | (ECWMax << 12) | (TXOP << 16); switch (ACI) { case 0x0: rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BE, (u8 *)(&acParm)); acm_mask |= (ACM ? BIT(1):0); edca[XMIT_BE_QUEUE] = acParm; break; case 0x1: rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_BK, (u8 *)(&acParm)); /* acm_mask |= (ACM? BIT(0):0); */ edca[XMIT_BK_QUEUE] = acParm; break; case 0x2: rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VI, (u8 *)(&acParm)); acm_mask |= (ACM ? BIT(2):0); edca[XMIT_VI_QUEUE] = acParm; break; case 0x3: rtw_hal_set_hwreg(padapter, HW_VAR_AC_PARAM_VO, (u8 *)(&acParm)); acm_mask |= (ACM ? BIT(3):0); edca[XMIT_VO_QUEUE] = acParm; break; } } if (padapter->registrypriv.acm_method == 1) rtw_hal_set_hwreg(padapter, HW_VAR_ACM_CTRL, (u8 *)(&acm_mask)); else padapter->mlmepriv.acm_mask = acm_mask; inx[0] = 0; inx[1] = 1; inx[2] = 2; inx[3] = 3; if (pregpriv->wifi_spec == 1) sort_wmm_ac_params(inx, edca); for (i = 0; i < 4; i++) pxmitpriv->wmm_para_seq[i] = inx[i]; } } static void bwmode_update_check(struct adapter *padapter, struct ndis_80211_var_ie *pIE) { unsigned char new_bwmode; unsigned char new_ch_offset; struct HT_info_element *pHT_info; struct mlme_priv *pmlmepriv = &(padapter->mlmepriv); struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct registry_priv *pregistrypriv = &padapter->registrypriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; u8 cbw40_enable = 0; if (!pIE) return; if (phtpriv->ht_option == false) return; if (pIE->length > sizeof(struct HT_info_element)) return; pHT_info = (struct HT_info_element *)pIE->data; if (pmlmeext->cur_channel > 14) { if ((pregistrypriv->bw_mode & 0xf0) > 0) cbw40_enable = 1; } else { if ((pregistrypriv->bw_mode & 0x0f) > 0) cbw40_enable = 1; } if ((pHT_info->infos[0] & BIT(2)) && cbw40_enable) { new_bwmode = CHANNEL_WIDTH_40; switch (pHT_info->infos[0] & 0x3) { case 1: new_ch_offset = HAL_PRIME_CHNL_OFFSET_LOWER; break; case 3: new_ch_offset = HAL_PRIME_CHNL_OFFSET_UPPER; break; default: new_bwmode = CHANNEL_WIDTH_20; new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE; break; } } else { new_bwmode = CHANNEL_WIDTH_20; new_ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE; } if ((new_bwmode != pmlmeext->cur_bwmode) || (new_ch_offset != pmlmeext->cur_ch_offset)) { pmlmeinfo->bwmode_updated = true; pmlmeext->cur_bwmode = new_bwmode; pmlmeext->cur_ch_offset = new_ch_offset; /* update HT info also */ HT_info_handler(padapter, pIE); } else { pmlmeinfo->bwmode_updated = false; } if (true == pmlmeinfo->bwmode_updated) { struct sta_info *psta; struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); struct sta_priv *pstapriv = &padapter->stapriv; /* set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */ /* update ap's stainfo */ psta = rtw_get_stainfo(pstapriv, cur_network->mac_address); if (psta) { struct ht_priv *phtpriv_sta = &psta->htpriv; if (phtpriv_sta->ht_option) { /* bwmode */ psta->bw_mode = pmlmeext->cur_bwmode; phtpriv_sta->ch_offset = pmlmeext->cur_ch_offset; } else { psta->bw_mode = CHANNEL_WIDTH_20; phtpriv_sta->ch_offset = HAL_PRIME_CHNL_OFFSET_DONT_CARE; } rtw_dm_ra_mask_wk_cmd(padapter, (u8 *)psta); } } } void HT_caps_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE) { unsigned int i; u8 max_AMPDU_len, min_MPDU_spacing; u8 cur_ldpc_cap = 0, cur_stbc_cap = 0; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; if (!pIE) return; if (phtpriv->ht_option == false) return; pmlmeinfo->HT_caps_enable = 1; for (i = 0; i < (pIE->length); i++) { if (i != 2) { /* Commented by Albert 2010/07/12 */ /* Got the endian issue here. */ pmlmeinfo->HT_caps.u.HT_cap[i] &= (pIE->data[i]); } else { /* modify from fw by Thomas 2010/11/17 */ max_AMPDU_len = min(pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x3, pIE->data[i] & 0x3); min_MPDU_spacing = max(pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c, pIE->data[i] & 0x1c); pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para = max_AMPDU_len | min_MPDU_spacing; } } /* update the MCS set */ for (i = 0; i < 16; i++) pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate[i] &= pmlmeext->default_supported_mcs_set[i]; /* update the MCS rates */ set_mcs_rate_by_mask(pmlmeinfo->HT_caps.u.HT_cap_element.MCS_rate, MCS_RATE_1R); if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) { /* Config STBC setting */ if (TEST_FLAG(phtpriv->stbc_cap, STBC_HT_ENABLE_TX) && GET_HT_CAPABILITY_ELE_TX_STBC(pIE->data)) SET_FLAG(cur_stbc_cap, STBC_HT_ENABLE_TX); phtpriv->stbc_cap = cur_stbc_cap; } else { /* Config LDPC Coding Capability */ if (TEST_FLAG(phtpriv->ldpc_cap, LDPC_HT_ENABLE_TX) && GET_HT_CAPABILITY_ELE_LDPC_CAP(pIE->data)) SET_FLAG(cur_ldpc_cap, (LDPC_HT_ENABLE_TX | LDPC_HT_CAP_TX)); phtpriv->ldpc_cap = cur_ldpc_cap; /* Config STBC setting */ if (TEST_FLAG(phtpriv->stbc_cap, STBC_HT_ENABLE_TX) && GET_HT_CAPABILITY_ELE_RX_STBC(pIE->data)) SET_FLAG(cur_stbc_cap, (STBC_HT_ENABLE_TX | STBC_HT_CAP_TX)); phtpriv->stbc_cap = cur_stbc_cap; } } void HT_info_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE) { struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; if (!pIE) return; if (phtpriv->ht_option == false) return; if (pIE->length > sizeof(struct HT_info_element)) return; pmlmeinfo->HT_info_enable = 1; memcpy(&(pmlmeinfo->HT_info), pIE->data, pIE->length); } void HTOnAssocRsp(struct adapter *padapter) { unsigned char max_AMPDU_len; unsigned char min_MPDU_spacing; /* struct registry_priv *pregpriv = &padapter->registrypriv; */ struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable)) { pmlmeinfo->HT_enable = 1; } else { pmlmeinfo->HT_enable = 0; /* set_channel_bwmode(padapter, pmlmeext->cur_channel, pmlmeext->cur_ch_offset, pmlmeext->cur_bwmode); */ return; } /* handle A-MPDU parameter field */ /* AMPDU_para [1:0]:Max AMPDU Len => 0:8k , 1:16k, 2:32k, 3:64k AMPDU_para [4:2]:Min MPDU Start Spacing */ max_AMPDU_len = pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x03; min_MPDU_spacing = (pmlmeinfo->HT_caps.u.HT_cap_element.AMPDU_para & 0x1c) >> 2; rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_MIN_SPACE, (u8 *)(&min_MPDU_spacing)); rtw_hal_set_hwreg(padapter, HW_VAR_AMPDU_FACTOR, (u8 *)(&max_AMPDU_len)); } void ERP_IE_handler(struct adapter *padapter, struct ndis_80211_var_ie *pIE) { struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); if (pIE->length > 1) return; pmlmeinfo->ERP_enable = 1; memcpy(&(pmlmeinfo->ERP_IE), pIE->data, pIE->length); } void VCS_update(struct adapter *padapter, struct sta_info *psta) { struct registry_priv *pregpriv = &padapter->registrypriv; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); switch (pregpriv->vrtl_carrier_sense) {/* 0:off 1:on 2:auto */ case 0: /* off */ psta->rtsen = 0; psta->cts2self = 0; break; case 1: /* on */ if (pregpriv->vcs_type == 1) { /* 1:RTS/CTS 2:CTS to self */ psta->rtsen = 1; psta->cts2self = 0; } else { psta->rtsen = 0; psta->cts2self = 1; } break; case 2: /* auto */ default: if ((pmlmeinfo->ERP_enable) && (pmlmeinfo->ERP_IE & BIT(1))) { if (pregpriv->vcs_type == 1) { psta->rtsen = 1; psta->cts2self = 0; } else { psta->rtsen = 0; psta->cts2self = 1; } } else { psta->rtsen = 0; psta->cts2self = 0; } break; } } void update_ldpc_stbc_cap(struct sta_info *psta) { if (psta->htpriv.ht_option) { if (TEST_FLAG(psta->htpriv.ldpc_cap, LDPC_HT_ENABLE_TX)) psta->ldpc = 1; if (TEST_FLAG(psta->htpriv.stbc_cap, STBC_HT_ENABLE_TX)) psta->stbc = 1; } else { psta->ldpc = 0; psta->stbc = 0; } } int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len) { unsigned int len; unsigned char *p; unsigned short val16, subtype; struct wlan_network *cur_network = &(Adapter->mlmepriv.cur_network); /* u8 wpa_ie[255], rsn_ie[255]; */ u16 wpa_len = 0, rsn_len = 0; u8 encryp_protocol = 0; struct wlan_bssid_ex *bssid; int group_cipher = 0, pairwise_cipher = 0, is_8021x = 0; unsigned char *pbuf; u32 wpa_ielen = 0; u8 *pbssid = GetAddr3Ptr(pframe); struct HT_info_element *pht_info = NULL; struct ieee80211_ht_cap *pht_cap = NULL; u32 bcn_channel; unsigned short ht_cap_info; unsigned char ht_info_infos_0; struct mlme_priv *pmlmepriv = &Adapter->mlmepriv; int ssid_len; if (is_client_associated_to_ap(Adapter) == false) return true; len = packet_len - sizeof(struct ieee80211_hdr_3addr); if (len > MAX_IE_SZ) return _FAIL; if (memcmp(cur_network->network.mac_address, pbssid, 6)) return true; bssid = rtw_zmalloc(sizeof(struct wlan_bssid_ex)); if (!bssid) return true; if ((pmlmepriv->timeBcnInfoChkStart != 0) && (jiffies_to_msecs(jiffies - pmlmepriv->timeBcnInfoChkStart) > DISCONNECT_BY_CHK_BCN_FAIL_OBSERV_PERIOD_IN_MS)) { pmlmepriv->timeBcnInfoChkStart = 0; pmlmepriv->NumOfBcnInfoChkFail = 0; } subtype = GetFrameSubType(pframe) >> 4; if (subtype == WIFI_BEACON) bssid->reserved[0] = 1; bssid->length = sizeof(struct wlan_bssid_ex) - MAX_IE_SZ + len; /* below is to copy the information element */ bssid->ie_length = len; memcpy(bssid->ies, (pframe + sizeof(struct ieee80211_hdr_3addr)), bssid->ie_length); /* check bw and channel offset */ /* parsing HT_CAP_IE */ p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, WLAN_EID_HT_CAPABILITY, &len, bssid->ie_length - _FIXED_IE_LENGTH_); if (p && len > 0) { pht_cap = (struct ieee80211_ht_cap *)(p + 2); ht_cap_info = le16_to_cpu(pht_cap->cap_info); } else { ht_cap_info = 0; } /* parsing HT_INFO_IE */ p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, WLAN_EID_HT_OPERATION, &len, bssid->ie_length - _FIXED_IE_LENGTH_); if (p && len > 0) { pht_info = (struct HT_info_element *)(p + 2); ht_info_infos_0 = pht_info->infos[0]; } else { ht_info_infos_0 = 0; } if (ht_cap_info != cur_network->bcn_info.ht_cap_info || ((ht_info_infos_0&0x03) != (cur_network->bcn_info.ht_info_infos_0&0x03))) { { /* bcn_info_update */ cur_network->bcn_info.ht_cap_info = ht_cap_info; cur_network->bcn_info.ht_info_infos_0 = ht_info_infos_0; /* to do : need to check that whether modify related register of BB or not */ } /* goto _mismatch; */ } /* Checking for channel */ p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, WLAN_EID_DS_PARAMS, &len, bssid->ie_length - _FIXED_IE_LENGTH_); if (p) { bcn_channel = *(p + 2); } else {/* In 5G, some ap do not have DSSET IE checking HT info for channel */ rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, WLAN_EID_HT_OPERATION, &len, bssid->ie_length - _FIXED_IE_LENGTH_); if (pht_info) bcn_channel = pht_info->primary_channel; else /* we don't find channel IE, so don't check it */ bcn_channel = Adapter->mlmeextpriv.cur_channel; } if (bcn_channel != Adapter->mlmeextpriv.cur_channel) goto _mismatch; /* checking SSID */ ssid_len = 0; p = rtw_get_ie(bssid->ies + _FIXED_IE_LENGTH_, WLAN_EID_SSID, &len, bssid->ie_length - _FIXED_IE_LENGTH_); if (p) { ssid_len = *(p + 1); if (ssid_len > NDIS_802_11_LENGTH_SSID) ssid_len = 0; } memcpy(bssid->ssid.ssid, (p + 2), ssid_len); bssid->ssid.ssid_length = ssid_len; if (memcmp(bssid->ssid.ssid, cur_network->network.ssid.ssid, 32) || bssid->ssid.ssid_length != cur_network->network.ssid.ssid_length) if (bssid->ssid.ssid[0] != '\0' && bssid->ssid.ssid_length != 0) /* not hidden ssid */ goto _mismatch; /* check encryption info */ val16 = rtw_get_capability((struct wlan_bssid_ex *)bssid); if (val16 & BIT(4)) bssid->privacy = 1; else bssid->privacy = 0; if (cur_network->network.privacy != bssid->privacy) goto _mismatch; rtw_get_sec_ie(bssid->ies, bssid->ie_length, NULL, &rsn_len, NULL, &wpa_len); if (rsn_len > 0) encryp_protocol = ENCRYP_PROTOCOL_WPA2; else if (wpa_len > 0) encryp_protocol = ENCRYP_PROTOCOL_WPA; else if (bssid->privacy) encryp_protocol = ENCRYP_PROTOCOL_WEP; if (cur_network->bcn_info.encryp_protocol != encryp_protocol) goto _mismatch; if (encryp_protocol == ENCRYP_PROTOCOL_WPA || encryp_protocol == ENCRYP_PROTOCOL_WPA2) { pbuf = rtw_get_wpa_ie(&bssid->ies[12], &wpa_ielen, bssid->ie_length-12); if (pbuf && (wpa_ielen > 0)) { rtw_parse_wpa_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is_8021x); } else { pbuf = rtw_get_wpa2_ie(&bssid->ies[12], &wpa_ielen, bssid->ie_length-12); if (pbuf && (wpa_ielen > 0)) rtw_parse_wpa2_ie(pbuf, wpa_ielen + 2, &group_cipher, &pairwise_cipher, &is_8021x); } if (pairwise_cipher != cur_network->bcn_info.pairwise_cipher || group_cipher != cur_network->bcn_info.group_cipher) goto _mismatch; if (is_8021x != cur_network->bcn_info.is_8021x) goto _mismatch; } kfree(bssid); return _SUCCESS; _mismatch: kfree(bssid); if (pmlmepriv->NumOfBcnInfoChkFail == 0) pmlmepriv->timeBcnInfoChkStart = jiffies; pmlmepriv->NumOfBcnInfoChkFail++; if ((pmlmepriv->timeBcnInfoChkStart != 0) && (jiffies_to_msecs(jiffies - pmlmepriv->timeBcnInfoChkStart) <= DISCONNECT_BY_CHK_BCN_FAIL_OBSERV_PERIOD_IN_MS) && (pmlmepriv->NumOfBcnInfoChkFail >= DISCONNECT_BY_CHK_BCN_FAIL_THRESHOLD)) { pmlmepriv->timeBcnInfoChkStart = 0; pmlmepriv->NumOfBcnInfoChkFail = 0; return _FAIL; } return _SUCCESS; } void update_beacon_info(struct adapter *padapter, u8 *pframe, uint pkt_len, struct sta_info *psta) { unsigned int i; unsigned int len; struct ndis_80211_var_ie *pIE; len = pkt_len - (_BEACON_IE_OFFSET_ + WLAN_HDR_A3_LEN); for (i = 0; i < len;) { pIE = (struct ndis_80211_var_ie *)(pframe + (_BEACON_IE_OFFSET_ + WLAN_HDR_A3_LEN) + i); switch (pIE->element_id) { case WLAN_EID_VENDOR_SPECIFIC: /* to update WMM parameter set while receiving beacon */ if (!memcmp(pIE->data, WMM_PARA_OUI, 6) && pIE->length == WLAN_WMM_LEN) /* WMM */ if (WMM_param_handler(padapter, pIE)) report_wmm_edca_update(padapter); break; case WLAN_EID_HT_OPERATION: /* HT info */ /* HT_info_handler(padapter, pIE); */ bwmode_update_check(padapter, pIE); break; case WLAN_EID_ERP_INFO: ERP_IE_handler(padapter, pIE); VCS_update(padapter, psta); break; default: break; } i += (pIE->length + 2); } } unsigned int is_ap_in_tkip(struct adapter *padapter) { u32 i; struct ndis_80211_var_ie *pIE; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); if (rtw_get_capability((struct wlan_bssid_ex *)cur_network) & WLAN_CAPABILITY_PRIVACY) { for (i = sizeof(struct ndis_802_11_fix_ie); i < pmlmeinfo->network.ie_length;) { pIE = (struct ndis_80211_var_ie *)(pmlmeinfo->network.ies + i); switch (pIE->element_id) { case WLAN_EID_VENDOR_SPECIFIC: if ((!memcmp(pIE->data, RTW_WPA_OUI, 4)) && (!memcmp((pIE->data + 12), WPA_TKIP_CIPHER, 4))) return true; break; case WLAN_EID_RSN: if (!memcmp((pIE->data + 8), RSN_TKIP_CIPHER, 4)) return true; break; default: break; } i += (pIE->length + 2); } return false; } else { return false; } } int support_short_GI(struct adapter *padapter, struct HT_caps_element *pHT_caps, u8 bwmode) { unsigned char bit_offset; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); if (!(pmlmeinfo->HT_enable)) return _FAIL; bit_offset = (bwmode & CHANNEL_WIDTH_40) ? 6 : 5; if (le16_to_cpu(pHT_caps->u.HT_cap_element.HT_caps_info) & (0x1 << bit_offset)) return _SUCCESS; else return _FAIL; } unsigned char get_highest_rate_idx(u32 mask) { int i; unsigned char rate_idx = 0; for (i = 31; i >= 0; i--) { if (mask & BIT(i)) { rate_idx = i; break; } } return rate_idx; } void Update_RA_Entry(struct adapter *padapter, struct sta_info *psta) { rtw_hal_update_ra_mask(psta, 0); } void set_sta_rate(struct adapter *padapter, struct sta_info *psta) { /* rate adaptive */ Update_RA_Entry(padapter, psta); } static u32 get_realtek_assoc_AP_vender(struct ndis_80211_var_ie *pIE) { u32 Vender = HT_IOT_PEER_REALTEK; if (pIE->length >= 5) { if (pIE->data[4] == 1) /* if (pIE->data[5] & RT_HT_CAP_USE_LONG_PREAMBLE) */ /* bssDesc->BssHT.RT2RT_HT_Mode |= RT_HT_CAP_USE_LONG_PREAMBLE; */ if (pIE->data[5] & RT_HT_CAP_USE_92SE) /* bssDesc->BssHT.RT2RT_HT_Mode |= RT_HT_CAP_USE_92SE; */ Vender = HT_IOT_PEER_REALTEK_92SE; if (pIE->data[5] & RT_HT_CAP_USE_SOFTAP) Vender = HT_IOT_PEER_REALTEK_SOFTAP; if (pIE->data[4] == 2) { if (pIE->data[6] & RT_HT_CAP_USE_JAGUAR_BCUT) Vender = HT_IOT_PEER_REALTEK_JAGUAR_BCUTAP; if (pIE->data[6] & RT_HT_CAP_USE_JAGUAR_CCUT) Vender = HT_IOT_PEER_REALTEK_JAGUAR_CCUTAP; } } return Vender; } unsigned char check_assoc_AP(u8 *pframe, uint len) { unsigned int i; struct ndis_80211_var_ie *pIE; for (i = sizeof(struct ndis_802_11_fix_ie); i < len;) { pIE = (struct ndis_80211_var_ie *)(pframe + i); switch (pIE->element_id) { case WLAN_EID_VENDOR_SPECIFIC: if ((!memcmp(pIE->data, ARTHEROS_OUI1, 3)) || (!memcmp(pIE->data, ARTHEROS_OUI2, 3))) return HT_IOT_PEER_ATHEROS; else if ((!memcmp(pIE->data, BROADCOM_OUI1, 3)) || (!memcmp(pIE->data, BROADCOM_OUI2, 3)) || (!memcmp(pIE->data, BROADCOM_OUI3, 3))) return HT_IOT_PEER_BROADCOM; else if (!memcmp(pIE->data, MARVELL_OUI, 3)) return HT_IOT_PEER_MARVELL; else if (!memcmp(pIE->data, RALINK_OUI, 3)) return HT_IOT_PEER_RALINK; else if (!memcmp(pIE->data, CISCO_OUI, 3)) return HT_IOT_PEER_CISCO; else if (!memcmp(pIE->data, REALTEK_OUI, 3)) return get_realtek_assoc_AP_vender(pIE); else if (!memcmp(pIE->data, AIRGOCAP_OUI, 3)) return HT_IOT_PEER_AIRGO; else break; default: break; } i += (pIE->length + 2); } return HT_IOT_PEER_UNKNOWN; } void update_IOT_info(struct adapter *padapter) { struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); switch (pmlmeinfo->assoc_AP_vendor) { case HT_IOT_PEER_MARVELL: pmlmeinfo->turboMode_cts2self = 1; pmlmeinfo->turboMode_rtsen = 0; break; case HT_IOT_PEER_RALINK: pmlmeinfo->turboMode_cts2self = 0; pmlmeinfo->turboMode_rtsen = 1; /* disable high power */ Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false); break; case HT_IOT_PEER_REALTEK: /* rtw_write16(padapter, 0x4cc, 0xffff); */ /* rtw_write16(padapter, 0x546, 0x01c0); */ /* disable high power */ Switch_DM_Func(padapter, (~DYNAMIC_BB_DYNAMIC_TXPWR), false); break; default: pmlmeinfo->turboMode_cts2self = 0; pmlmeinfo->turboMode_rtsen = 1; break; } } void update_capinfo(struct adapter *Adapter, u16 updateCap) { struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); bool ShortPreamble; /* Check preamble mode, 2005.01.06, by rcnjko. */ /* Mark to update preamble value forever, 2008.03.18 by lanhsin */ /* if (pMgntInfo->RegPreambleMode == PREAMBLE_AUTO) */ { if (updateCap & cShortPreamble) { /* Short Preamble */ if (pmlmeinfo->preamble_mode != PREAMBLE_SHORT) { /* PREAMBLE_LONG or PREAMBLE_AUTO */ ShortPreamble = true; pmlmeinfo->preamble_mode = PREAMBLE_SHORT; rtw_hal_set_hwreg(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble); } } else { /* Long Preamble */ if (pmlmeinfo->preamble_mode != PREAMBLE_LONG) { /* PREAMBLE_SHORT or PREAMBLE_AUTO */ ShortPreamble = false; pmlmeinfo->preamble_mode = PREAMBLE_LONG; rtw_hal_set_hwreg(Adapter, HW_VAR_ACK_PREAMBLE, (u8 *)&ShortPreamble); } } } if (updateCap & cIBSS) { /* Filen: See 802.11-2007 p.91 */ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME; } else { /* Filen: See 802.11-2007 p.90 */ if (pmlmeext->cur_wireless_mode & (WIRELESS_11_24N)) { pmlmeinfo->slotTime = SHORT_SLOT_TIME; } else if (pmlmeext->cur_wireless_mode & (WIRELESS_11G)) { if ((updateCap & cShortSlotTime) /* && (!(pMgntInfo->pHTInfo->RT2RT_HT_Mode & RT_HT_CAP_USE_LONG_PREAMBLE)) */) /* Short Slot Time */ pmlmeinfo->slotTime = SHORT_SLOT_TIME; else /* Long Slot Time */ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME; } else { /* B Mode */ pmlmeinfo->slotTime = NON_SHORT_SLOT_TIME; } } rtw_hal_set_hwreg(Adapter, HW_VAR_SLOT_TIME, &pmlmeinfo->slotTime); } void update_wireless_mode(struct adapter *padapter) { int network_type = 0; u32 SIFS_Timer; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network); unsigned char *rate = cur_network->supported_rates; if ((pmlmeinfo->HT_info_enable) && (pmlmeinfo->HT_caps_enable)) pmlmeinfo->HT_enable = 1; if (pmlmeinfo->HT_enable) network_type = WIRELESS_11_24N; if (rtw_is_cckratesonly_included(rate)) network_type |= WIRELESS_11B; else if (rtw_is_cckrates_included(rate)) network_type |= WIRELESS_11BG; else network_type |= WIRELESS_11G; pmlmeext->cur_wireless_mode = network_type & padapter->registrypriv.wireless_mode; SIFS_Timer = 0x0a0a0808; /* 0x0808 -> for CCK, 0x0a0a -> for OFDM */ /* change this value if having IOT issues. */ SetHwReg8723BS(padapter, HW_VAR_RESP_SIFS, (u8 *)&SIFS_Timer); SetHwReg8723BS(padapter, HW_VAR_WIRELESS_MODE, (u8 *)&(pmlmeext->cur_wireless_mode)); if (pmlmeext->cur_wireless_mode & WIRELESS_11B) update_mgnt_tx_rate(padapter, IEEE80211_CCK_RATE_1MB); else update_mgnt_tx_rate(padapter, IEEE80211_OFDM_RATE_6MB); } void update_sta_basic_rate(struct sta_info *psta, u8 wireless_mode) { if (is_supported_tx_cck(wireless_mode)) { /* Only B, B/G, and B/G/N AP could use CCK rate */ memcpy(psta->bssrateset, rtw_basic_rate_cck, 4); psta->bssratelen = 4; } else { memcpy(psta->bssrateset, rtw_basic_rate_ofdm, 3); psta->bssratelen = 3; } } int update_sta_support_rate(struct adapter *padapter, u8 *pvar_ie, uint var_ie_len, int cam_idx) { unsigned int ie_len; struct ndis_80211_var_ie *pIE; int supportRateNum = 0; struct mlme_ext_priv *pmlmeext = &(padapter->mlmeextpriv); struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, WLAN_EID_SUPP_RATES, &ie_len, var_ie_len); if (!pIE) return _FAIL; if (ie_len > sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates)) return _FAIL; memcpy(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates, pIE->data, ie_len); supportRateNum = ie_len; pIE = (struct ndis_80211_var_ie *)rtw_get_ie(pvar_ie, WLAN_EID_EXT_SUPP_RATES, &ie_len, var_ie_len); if (pIE && (ie_len <= sizeof(pmlmeinfo->FW_sta_info[cam_idx].SupportedRates) - supportRateNum)) memcpy((pmlmeinfo->FW_sta_info[cam_idx].SupportedRates + supportRateNum), pIE->data, ie_len); return _SUCCESS; } void process_addba_req(struct adapter *padapter, u8 *paddba_req, u8 *addr) { struct sta_info *psta; u16 tid, param; struct recv_reorder_ctrl *preorder_ctrl; struct sta_priv *pstapriv = &padapter->stapriv; struct ADDBA_request *preq = (struct ADDBA_request *)paddba_req; struct mlme_ext_priv *pmlmeext = &padapter->mlmeextpriv; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); psta = rtw_get_stainfo(pstapriv, addr); if (psta) { param = le16_to_cpu(preq->BA_para_set); tid = (param>>2)&0x0f; preorder_ctrl = &psta->recvreorder_ctrl[tid]; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->enable = pmlmeinfo->accept_addba_req; } } void update_TSF(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len) { u8 *pIE; __le32 *pbuf; pIE = pframe + sizeof(struct ieee80211_hdr_3addr); pbuf = (__le32 *)pIE; pmlmeext->TSFValue = le32_to_cpu(*(pbuf+1)); pmlmeext->TSFValue = pmlmeext->TSFValue << 32; pmlmeext->TSFValue |= le32_to_cpu(*pbuf); } void correct_TSF(struct adapter *padapter, struct mlme_ext_priv *pmlmeext) { rtw_hal_set_hwreg(padapter, HW_VAR_CORRECT_TSF, NULL); } void adaptive_early_32k(struct mlme_ext_priv *pmlmeext, u8 *pframe, uint len) { int i; u8 *pIE; __le32 *pbuf; u64 tsf = 0; u32 delay_ms; struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); pmlmeext->bcn_cnt++; pIE = pframe + sizeof(struct ieee80211_hdr_3addr); pbuf = (__le32 *)pIE; tsf = le32_to_cpu(*(pbuf+1)); tsf = tsf << 32; tsf |= le32_to_cpu(*pbuf); /* delay = (timestamp mod 1024*100)/1000 (unit: ms) */ /* delay_ms = do_div(tsf, (pmlmeinfo->bcn_interval*1024))/1000; */ delay_ms = do_div(tsf, (pmlmeinfo->bcn_interval*1024)); delay_ms = delay_ms/1000; if (delay_ms >= 8) pmlmeext->bcn_delay_cnt[8]++; /* pmlmeext->bcn_delay_ratio[8] = (pmlmeext->bcn_delay_cnt[8] * 100) /pmlmeext->bcn_cnt; */ else pmlmeext->bcn_delay_cnt[delay_ms]++; /* pmlmeext->bcn_delay_ratio[delay_ms] = (pmlmeext->bcn_delay_cnt[delay_ms] * 100) /pmlmeext->bcn_cnt; */ /* for (i = 0; i<9; i++) { pmlmeext->bcn_delay_cnt[i] , i, pmlmeext->bcn_delay_ratio[i]); } */ /* dump for adaptive_early_32k */ if (pmlmeext->bcn_cnt > 100 && (pmlmeext->adaptive_tsf_done == true)) { u8 ratio_20_delay, ratio_80_delay; u8 DrvBcnEarly, DrvBcnTimeOut; ratio_20_delay = 0; ratio_80_delay = 0; DrvBcnEarly = 0xff; DrvBcnTimeOut = 0xff; for (i = 0; i < 9; i++) { pmlmeext->bcn_delay_ratio[i] = (pmlmeext->bcn_delay_cnt[i] * 100) / pmlmeext->bcn_cnt; ratio_20_delay += pmlmeext->bcn_delay_ratio[i]; ratio_80_delay += pmlmeext->bcn_delay_ratio[i]; if (ratio_20_delay > 20 && DrvBcnEarly == 0xff) DrvBcnEarly = i; if (ratio_80_delay > 80 && DrvBcnTimeOut == 0xff) DrvBcnTimeOut = i; /* reset adaptive_early_32k cnt */ pmlmeext->bcn_delay_cnt[i] = 0; pmlmeext->bcn_delay_ratio[i] = 0; } pmlmeext->DrvBcnEarly = DrvBcnEarly; pmlmeext->DrvBcnTimeOut = DrvBcnTimeOut; pmlmeext->bcn_cnt = 0; } } void rtw_alloc_macid(struct adapter *padapter, struct sta_info *psta) { int i; struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter); if (is_broadcast_ether_addr(psta->hwaddr)) return; if (!memcmp(psta->hwaddr, myid(&padapter->eeprompriv), ETH_ALEN)) { psta->mac_id = NUM_STA; return; } spin_lock_bh(&pdvobj->lock); for (i = 0; i < NUM_STA; i++) { if (pdvobj->macid[i] == false) { pdvobj->macid[i] = true; break; } } spin_unlock_bh(&pdvobj->lock); if (i > (NUM_STA - 1)) psta->mac_id = NUM_STA; else psta->mac_id = i; } void rtw_release_macid(struct adapter *padapter, struct sta_info *psta) { struct dvobj_priv *pdvobj = adapter_to_dvobj(padapter); if (is_broadcast_ether_addr(psta->hwaddr)) return; if (!memcmp(psta->hwaddr, myid(&padapter->eeprompriv), ETH_ALEN)) return; spin_lock_bh(&pdvobj->lock); if (psta->mac_id < NUM_STA && psta->mac_id != 1) { if (pdvobj->macid[psta->mac_id] == true) { pdvobj->macid[psta->mac_id] = false; psta->mac_id = NUM_STA; } } spin_unlock_bh(&pdvobj->lock); }
/* * SPDX-License-Identifier: MIT * * Copyright © 2019 Intel Corporation */ #ifndef _I915_ACTIVE_TYPES_H_ #define _I915_ACTIVE_TYPES_H_ struct i915_active {}; #define I915_ACTIVE_RETIRE_SLEEPS 0 #endif /* _I915_ACTIVE_TYPES_H_ */
/* SPDX-License-Identifier: GPL-2.0 */ /* * PCI Peer 2 Peer DMA support. * * Copyright (c) 2016-2018, Logan Gunthorpe * Copyright (c) 2016-2017, Microsemi Corporation * Copyright (c) 2017, Christoph Hellwig * Copyright (c) 2018, Eideticom Inc. */ #ifndef _LINUX_PCI_P2PDMA_H #define _LINUX_PCI_P2PDMA_H #include <linux/pci.h> struct block_device; struct scatterlist; #ifdef CONFIG_PCI_P2PDMA int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset); int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, int num_clients, bool verbose); bool pci_has_p2pmem(struct pci_dev *pdev); struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients); void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size); void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size); pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr); struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length); void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl); void pci_p2pmem_publish(struct pci_dev *pdev, bool publish); int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma); ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, bool use_p2pdma); #else /* CONFIG_PCI_P2PDMA */ static inline int pci_p2pdma_add_resource(struct pci_dev *pdev, int bar, size_t size, u64 offset) { return -EOPNOTSUPP; } static inline int pci_p2pdma_distance_many(struct pci_dev *provider, struct device **clients, int num_clients, bool verbose) { return -1; } static inline bool pci_has_p2pmem(struct pci_dev *pdev) { return false; } static inline struct pci_dev *pci_p2pmem_find_many(struct device **clients, int num_clients) { return NULL; } static inline void *pci_alloc_p2pmem(struct pci_dev *pdev, size_t size) { return NULL; } static inline void pci_free_p2pmem(struct pci_dev *pdev, void *addr, size_t size) { } static inline pci_bus_addr_t pci_p2pmem_virt_to_bus(struct pci_dev *pdev, void *addr) { return 0; } static inline struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev, unsigned int *nents, u32 length) { return NULL; } static inline void pci_p2pmem_free_sgl(struct pci_dev *pdev, struct scatterlist *sgl) { } static inline void pci_p2pmem_publish(struct pci_dev *pdev, bool publish) { } static inline int pci_p2pdma_enable_store(const char *page, struct pci_dev **p2p_dev, bool *use_p2pdma) { *use_p2pdma = false; return 0; } static inline ssize_t pci_p2pdma_enable_show(char *page, struct pci_dev *p2p_dev, bool use_p2pdma) { return sprintf(page, "none\n"); } #endif /* CONFIG_PCI_P2PDMA */ static inline int pci_p2pdma_distance(struct pci_dev *provider, struct device *client, bool verbose) { return pci_p2pdma_distance_many(provider, &client, 1, verbose); } static inline struct pci_dev *pci_p2pmem_find(struct device *client) { return pci_p2pmem_find_many(&client, 1); } #endif /* _LINUX_PCI_P2P_H */
/* * This file implement the Wireless Extensions core API. * * Authors : Jean Tourrilhes - HPL - <[email protected]> * Copyright (c) 1997-2007 Jean Tourrilhes, All Rights Reserved. * Copyright 2009 Johannes Berg <[email protected]> * Copyright (C) 2024 Intel Corporation * * (As all part of the Linux kernel, this file is GPL) */ #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/slab.h> #include <linux/wireless.h> #include <linux/uaccess.h> #include <linux/export.h> #include <net/cfg80211.h> #include <net/iw_handler.h> #include <net/netlink.h> #include <net/wext.h> #include <net/net_namespace.h> typedef int (*wext_ioctl_func)(struct net_device *, struct iwreq *, unsigned int, struct iw_request_info *, iw_handler); /* * Meta-data about all the standard Wireless Extension request we * know about. */ static const struct iw_ioctl_description standard_ioctl[] = { [IW_IOCTL_IDX(SIOCSIWCOMMIT)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWNAME)] = { .header_type = IW_HEADER_TYPE_CHAR, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWNWID)] = { .header_type = IW_HEADER_TYPE_PARAM, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWNWID)] = { .header_type = IW_HEADER_TYPE_PARAM, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWFREQ)] = { .header_type = IW_HEADER_TYPE_FREQ, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWFREQ)] = { .header_type = IW_HEADER_TYPE_FREQ, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWMODE)] = { .header_type = IW_HEADER_TYPE_UINT, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWMODE)] = { .header_type = IW_HEADER_TYPE_UINT, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWSENS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWSENS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWRANGE)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWRANGE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_range), .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWPRIV)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWPRIV)] = { /* (handled directly by us) */ .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct iw_priv_args), .max_tokens = 16, .flags = IW_DESCR_FLAG_NOMAX, }, [IW_IOCTL_IDX(SIOCSIWSTATS)] = { .header_type = IW_HEADER_TYPE_NULL, }, [IW_IOCTL_IDX(SIOCGIWSTATS)] = { /* (handled directly by us) */ .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_statistics), .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct sockaddr), .max_tokens = IW_MAX_SPY, }, [IW_IOCTL_IDX(SIOCGIWSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality), .max_tokens = IW_MAX_SPY, }, [IW_IOCTL_IDX(SIOCSIWTHRSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct iw_thrspy), .min_tokens = 1, .max_tokens = 1, }, [IW_IOCTL_IDX(SIOCGIWTHRSPY)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct iw_thrspy), .min_tokens = 1, .max_tokens = 1, }, [IW_IOCTL_IDX(SIOCSIWAP)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_IOCTL_IDX(SIOCGIWAP)] = { .header_type = IW_HEADER_TYPE_ADDR, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWMLME)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_mlme), .max_tokens = sizeof(struct iw_mlme), }, [IW_IOCTL_IDX(SIOCGIWAPLIST)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = sizeof(struct sockaddr) + sizeof(struct iw_quality), .max_tokens = IW_MAX_AP, .flags = IW_DESCR_FLAG_NOMAX, }, [IW_IOCTL_IDX(SIOCSIWSCAN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = 0, .max_tokens = sizeof(struct iw_scan_req), }, [IW_IOCTL_IDX(SIOCGIWSCAN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_SCAN_MAX_DATA, .flags = IW_DESCR_FLAG_NOMAX, }, [IW_IOCTL_IDX(SIOCSIWESSID)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, .flags = IW_DESCR_FLAG_EVENT, }, [IW_IOCTL_IDX(SIOCGIWESSID)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, .flags = IW_DESCR_FLAG_DUMP, }, [IW_IOCTL_IDX(SIOCSIWNICKN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, }, [IW_IOCTL_IDX(SIOCGIWNICKN)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ESSID_MAX_SIZE, }, [IW_IOCTL_IDX(SIOCSIWRATE)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWRATE)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWRTS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWRTS)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWFRAG)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWFRAG)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWTXPOW)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWTXPOW)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWRETRY)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWRETRY)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWENCODE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ENCODING_TOKEN_MAX, .flags = IW_DESCR_FLAG_EVENT | IW_DESCR_FLAG_RESTRICT, }, [IW_IOCTL_IDX(SIOCGIWENCODE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_ENCODING_TOKEN_MAX, .flags = IW_DESCR_FLAG_DUMP | IW_DESCR_FLAG_RESTRICT, }, [IW_IOCTL_IDX(SIOCSIWPOWER)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWPOWER)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWGENIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_IOCTL_IDX(SIOCGIWGENIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_IOCTL_IDX(SIOCSIWAUTH)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCGIWAUTH)] = { .header_type = IW_HEADER_TYPE_PARAM, }, [IW_IOCTL_IDX(SIOCSIWENCODEEXT)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_encode_ext), .max_tokens = sizeof(struct iw_encode_ext) + IW_ENCODING_TOKEN_MAX, }, [IW_IOCTL_IDX(SIOCGIWENCODEEXT)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_encode_ext), .max_tokens = sizeof(struct iw_encode_ext) + IW_ENCODING_TOKEN_MAX, }, [IW_IOCTL_IDX(SIOCSIWPMKSA)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .min_tokens = sizeof(struct iw_pmksa), .max_tokens = sizeof(struct iw_pmksa), }, }; static const unsigned int standard_ioctl_num = ARRAY_SIZE(standard_ioctl); /* * Meta-data about all the additional standard Wireless Extension events * we know about. */ static const struct iw_ioctl_description standard_event[] = { [IW_EVENT_IDX(IWEVTXDROP)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_EVENT_IDX(IWEVQUAL)] = { .header_type = IW_HEADER_TYPE_QUAL, }, [IW_EVENT_IDX(IWEVCUSTOM)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_CUSTOM_MAX, }, [IW_EVENT_IDX(IWEVREGISTERED)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_EVENT_IDX(IWEVEXPIRED)] = { .header_type = IW_HEADER_TYPE_ADDR, }, [IW_EVENT_IDX(IWEVGENIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_EVENT_IDX(IWEVMICHAELMICFAILURE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_michaelmicfailure), }, [IW_EVENT_IDX(IWEVASSOCREQIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_EVENT_IDX(IWEVASSOCRESPIE)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = IW_GENERIC_IE_MAX, }, [IW_EVENT_IDX(IWEVPMKIDCAND)] = { .header_type = IW_HEADER_TYPE_POINT, .token_size = 1, .max_tokens = sizeof(struct iw_pmkid_cand), }, }; static const unsigned int standard_event_num = ARRAY_SIZE(standard_event); /* Size (in bytes) of various events */ static const int event_type_size[] = { IW_EV_LCP_LEN, /* IW_HEADER_TYPE_NULL */ 0, IW_EV_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */ 0, IW_EV_UINT_LEN, /* IW_HEADER_TYPE_UINT */ IW_EV_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */ IW_EV_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */ 0, IW_EV_POINT_LEN, /* Without variable payload */ IW_EV_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */ IW_EV_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ }; #ifdef CONFIG_COMPAT static const int compat_event_type_size[] = { IW_EV_COMPAT_LCP_LEN, /* IW_HEADER_TYPE_NULL */ 0, IW_EV_COMPAT_CHAR_LEN, /* IW_HEADER_TYPE_CHAR */ 0, IW_EV_COMPAT_UINT_LEN, /* IW_HEADER_TYPE_UINT */ IW_EV_COMPAT_FREQ_LEN, /* IW_HEADER_TYPE_FREQ */ IW_EV_COMPAT_ADDR_LEN, /* IW_HEADER_TYPE_ADDR */ 0, IW_EV_COMPAT_POINT_LEN, /* Without variable payload */ IW_EV_COMPAT_PARAM_LEN, /* IW_HEADER_TYPE_PARAM */ IW_EV_COMPAT_QUAL_LEN, /* IW_HEADER_TYPE_QUAL */ }; #endif /* IW event code */ void wireless_nlevent_flush(void) { struct sk_buff *skb; struct net *net; down_read(&net_rwsem); for_each_net(net) { while ((skb = skb_dequeue(&net->wext_nlevents))) rtnl_notify(skb, net, 0, RTNLGRP_LINK, NULL, GFP_KERNEL); } up_read(&net_rwsem); } EXPORT_SYMBOL_GPL(wireless_nlevent_flush); static int wext_netdev_notifier_call(struct notifier_block *nb, unsigned long state, void *ptr) { /* * When a netdev changes state in any way, flush all pending messages * to avoid them going out in a strange order, e.g. RTM_NEWLINK after * RTM_DELLINK, or with IFF_UP after without IFF_UP during dev_close() * or similar - all of which could otherwise happen due to delays from * schedule_work(). */ wireless_nlevent_flush(); return NOTIFY_OK; } static struct notifier_block wext_netdev_notifier = { .notifier_call = wext_netdev_notifier_call, }; static int __net_init wext_pernet_init(struct net *net) { skb_queue_head_init(&net->wext_nlevents); return 0; } static void __net_exit wext_pernet_exit(struct net *net) { skb_queue_purge(&net->wext_nlevents); } static struct pernet_operations wext_pernet_ops = { .init = wext_pernet_init, .exit = wext_pernet_exit, }; static int __init wireless_nlevent_init(void) { int err = register_pernet_subsys(&wext_pernet_ops); if (err) return err; err = register_netdevice_notifier(&wext_netdev_notifier); if (err) unregister_pernet_subsys(&wext_pernet_ops); return err; } subsys_initcall(wireless_nlevent_init); /* Process events generated by the wireless layer or the driver. */ static void wireless_nlevent_process(struct work_struct *work) { wireless_nlevent_flush(); } static DECLARE_WORK(wireless_nlevent_work, wireless_nlevent_process); static struct nlmsghdr *rtnetlink_ifinfo_prep(struct net_device *dev, struct sk_buff *skb) { struct ifinfomsg *r; struct nlmsghdr *nlh; nlh = nlmsg_put(skb, 0, 0, RTM_NEWLINK, sizeof(*r), 0); if (!nlh) return NULL; r = nlmsg_data(nlh); r->ifi_family = AF_UNSPEC; r->__ifi_pad = 0; r->ifi_type = dev->type; r->ifi_index = dev->ifindex; r->ifi_flags = dev_get_flags(dev); r->ifi_change = 0; /* Wireless changes don't affect those flags */ if (nla_put_string(skb, IFLA_IFNAME, dev->name)) goto nla_put_failure; return nlh; nla_put_failure: nlmsg_cancel(skb, nlh); return NULL; } /* * Main event dispatcher. Called from other parts and drivers. * Send the event on the appropriate channels. * May be called from interrupt context. */ void wireless_send_event(struct net_device * dev, unsigned int cmd, union iwreq_data * wrqu, const char * extra) { const struct iw_ioctl_description * descr = NULL; int extra_len = 0; struct iw_event *event; /* Mallocated whole event */ int event_len; /* Its size */ int hdr_len; /* Size of the event header */ int wrqu_off = 0; /* Offset in wrqu */ /* Don't "optimise" the following variable, it will crash */ unsigned int cmd_index; /* *MUST* be unsigned */ struct sk_buff *skb; struct nlmsghdr *nlh; struct nlattr *nla; #ifdef CONFIG_COMPAT struct __compat_iw_event *compat_event; struct compat_iw_point compat_wrqu; struct sk_buff *compskb; int ptr_len; #endif /* * Nothing in the kernel sends scan events with data, be safe. * This is necessary because we cannot fix up scan event data * for compat, due to being contained in 'extra', but normally * applications are required to retrieve the scan data anyway * and no data is included in the event, this codifies that * practice. */ if (WARN_ON(cmd == SIOCGIWSCAN && extra)) extra = NULL; /* Get the description of the Event */ if (cmd <= SIOCIWLAST) { cmd_index = IW_IOCTL_IDX(cmd); if (cmd_index < standard_ioctl_num) descr = &(standard_ioctl[cmd_index]); } else { cmd_index = IW_EVENT_IDX(cmd); if (cmd_index < standard_event_num) descr = &(standard_event[cmd_index]); } /* Don't accept unknown events */ if (descr == NULL) { /* Note : we don't return an error to the driver, because * the driver would not know what to do about it. It can't * return an error to the user, because the event is not * initiated by a user request. * The best the driver could do is to log an error message. * We will do it ourselves instead... */ netdev_err(dev, "(WE) : Invalid/Unknown Wireless Event (0x%04X)\n", cmd); return; } /* Check extra parameters and set extra_len */ if (descr->header_type == IW_HEADER_TYPE_POINT) { /* Check if number of token fits within bounds */ if (wrqu->data.length > descr->max_tokens) { netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too big (%d)\n", cmd, wrqu->data.length); return; } if (wrqu->data.length < descr->min_tokens) { netdev_err(dev, "(WE) : Wireless Event (cmd=0x%04X) too small (%d)\n", cmd, wrqu->data.length); return; } /* Calculate extra_len - extra is NULL for restricted events */ if (extra != NULL) extra_len = wrqu->data.length * descr->token_size; /* Always at an offset in wrqu */ wrqu_off = IW_EV_POINT_OFF; } /* Total length of the event */ hdr_len = event_type_size[descr->header_type]; event_len = hdr_len + extra_len; /* * The problem for 64/32 bit. * * On 64-bit, a regular event is laid out as follows: * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * | event.len | event.cmd | p a d d i n g | * | wrqu data ... (with the correct size) | * * This padding exists because we manipulate event->u, * and 'event' is not packed. * * An iw_point event is laid out like this instead: * | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | * | event.len | event.cmd | p a d d i n g | * | iwpnt.len | iwpnt.flg | p a d d i n g | * | extra data ... * * The second padding exists because struct iw_point is extended, * but this depends on the platform... * * On 32-bit, all the padding shouldn't be there. */ skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; /* Send via the RtNetlink event channel */ nlh = rtnetlink_ifinfo_prep(dev, skb); if (WARN_ON(!nlh)) { kfree_skb(skb); return; } /* Add the wireless events in the netlink packet */ nla = nla_reserve(skb, IFLA_WIRELESS, event_len); if (!nla) { kfree_skb(skb); return; } event = nla_data(nla); /* Fill event - first clear to avoid data leaking */ memset(event, 0, hdr_len); event->len = event_len; event->cmd = cmd; memcpy(&event->u, ((char *) wrqu) + wrqu_off, hdr_len - IW_EV_LCP_LEN); if (extra_len) memcpy(((char *) event) + hdr_len, extra, extra_len); nlmsg_end(skb, nlh); #ifdef CONFIG_COMPAT hdr_len = compat_event_type_size[descr->header_type]; /* ptr_len is remaining size in event header apart from LCP */ ptr_len = hdr_len - IW_EV_COMPAT_LCP_LEN; event_len = hdr_len + extra_len; compskb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!compskb) { kfree_skb(skb); return; } /* Send via the RtNetlink event channel */ nlh = rtnetlink_ifinfo_prep(dev, compskb); if (WARN_ON(!nlh)) { kfree_skb(skb); kfree_skb(compskb); return; } /* Add the wireless events in the netlink packet */ nla = nla_reserve(compskb, IFLA_WIRELESS, event_len); if (!nla) { kfree_skb(skb); kfree_skb(compskb); return; } compat_event = nla_data(nla); compat_event->len = event_len; compat_event->cmd = cmd; if (descr->header_type == IW_HEADER_TYPE_POINT) { compat_wrqu.length = wrqu->data.length; compat_wrqu.flags = wrqu->data.flags; memcpy(compat_event->ptr_bytes, ((char *)&compat_wrqu) + IW_EV_COMPAT_POINT_OFF, ptr_len); if (extra_len) memcpy(&compat_event->ptr_bytes[ptr_len], extra, extra_len); } else { /* extra_len must be zero, so no if (extra) needed */ memcpy(compat_event->ptr_bytes, wrqu, ptr_len); } nlmsg_end(compskb, nlh); skb_shinfo(skb)->frag_list = compskb; #endif skb_queue_tail(&dev_net(dev)->wext_nlevents, skb); schedule_work(&wireless_nlevent_work); } EXPORT_SYMBOL(wireless_send_event); #ifdef CONFIG_CFG80211_WEXT static void wireless_warn_cfg80211_wext(void) { char name[sizeof(current->comm)]; pr_warn_once("warning: `%s' uses wireless extensions which will stop working for Wi-Fi 7 hardware; use nl80211\n", get_task_comm(name, current)); } #endif /* IW handlers */ struct iw_statistics *get_wireless_stats(struct net_device *dev) { #ifdef CONFIG_WIRELESS_EXT if ((dev->wireless_handlers != NULL) && (dev->wireless_handlers->get_wireless_stats != NULL)) return dev->wireless_handlers->get_wireless_stats(dev); #endif #ifdef CONFIG_CFG80211_WEXT if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy && dev->ieee80211_ptr->wiphy->wext && dev->ieee80211_ptr->wiphy->wext->get_wireless_stats) { wireless_warn_cfg80211_wext(); if (dev->ieee80211_ptr->wiphy->flags & (WIPHY_FLAG_SUPPORTS_MLO | WIPHY_FLAG_DISABLE_WEXT)) return NULL; return dev->ieee80211_ptr->wiphy->wext->get_wireless_stats(dev); } #endif /* not found */ return NULL; } /* noinline to avoid a bogus warning with -O3 */ static noinline int iw_handler_get_iwstats(struct net_device * dev, struct iw_request_info * info, union iwreq_data * wrqu, char * extra) { /* Get stats from the driver */ struct iw_statistics *stats; stats = get_wireless_stats(dev); if (stats) { /* Copy statistics to extra */ memcpy(extra, stats, sizeof(struct iw_statistics)); wrqu->data.length = sizeof(struct iw_statistics); /* Check if we need to clear the updated flag */ if (wrqu->data.flags != 0) stats->qual.updated &= ~IW_QUAL_ALL_UPDATED; return 0; } else return -EOPNOTSUPP; } static iw_handler get_handler(struct net_device *dev, unsigned int cmd) { /* Don't "optimise" the following variable, it will crash */ unsigned int index; /* *MUST* be unsigned */ const struct iw_handler_def *handlers = NULL; #ifdef CONFIG_CFG80211_WEXT if (dev->ieee80211_ptr && dev->ieee80211_ptr->wiphy) { wireless_warn_cfg80211_wext(); if (dev->ieee80211_ptr->wiphy->flags & (WIPHY_FLAG_SUPPORTS_MLO | WIPHY_FLAG_DISABLE_WEXT)) return NULL; handlers = dev->ieee80211_ptr->wiphy->wext; } #endif #ifdef CONFIG_WIRELESS_EXT if (dev->wireless_handlers) handlers = dev->wireless_handlers; #endif if (!handlers) return NULL; /* Try as a standard command */ index = IW_IOCTL_IDX(cmd); if (index < handlers->num_standard) return handlers->standard[index]; #ifdef CONFIG_WEXT_PRIV /* Try as a private command */ index = cmd - SIOCIWFIRSTPRIV; if (index < handlers->num_private) return handlers->private[index]; #endif /* Not found */ return NULL; } static int ioctl_standard_iw_point(struct iw_point *iwp, unsigned int cmd, const struct iw_ioctl_description *descr, iw_handler handler, struct net_device *dev, struct iw_request_info *info) { int err, extra_size, user_length = 0, essid_compat = 0; char *extra; /* Calculate space needed by arguments. Always allocate * for max space. */ extra_size = descr->max_tokens * descr->token_size; /* Check need for ESSID compatibility for WE < 21 */ switch (cmd) { case SIOCSIWESSID: case SIOCGIWESSID: case SIOCSIWNICKN: case SIOCGIWNICKN: if (iwp->length == descr->max_tokens + 1) essid_compat = 1; else if (IW_IS_SET(cmd) && (iwp->length != 0)) { char essid[IW_ESSID_MAX_SIZE + 1]; unsigned int len; len = iwp->length * descr->token_size; if (len > IW_ESSID_MAX_SIZE) return -EFAULT; err = copy_from_user(essid, iwp->pointer, len); if (err) return -EFAULT; if (essid[iwp->length - 1] == '\0') essid_compat = 1; } break; default: break; } iwp->length -= essid_compat; /* Check what user space is giving us */ if (IW_IS_SET(cmd)) { /* Check NULL pointer */ if (!iwp->pointer && iwp->length != 0) return -EFAULT; /* Check if number of token fits within bounds */ if (iwp->length > descr->max_tokens) return -E2BIG; if (iwp->length < descr->min_tokens) return -EINVAL; } else { /* Check NULL pointer */ if (!iwp->pointer) return -EFAULT; /* Save user space buffer size for checking */ user_length = iwp->length; /* Don't check if user_length > max to allow forward * compatibility. The test user_length < min is * implied by the test at the end. */ /* Support for very large requests */ if ((descr->flags & IW_DESCR_FLAG_NOMAX) && (user_length > descr->max_tokens)) { /* Allow userspace to GET more than max so * we can support any size GET requests. * There is still a limit : -ENOMEM. */ extra_size = user_length * descr->token_size; /* Note : user_length is originally a __u16, * and token_size is controlled by us, * so extra_size won't get negative and * won't overflow... */ } } /* Sanity-check to ensure we never end up _allocating_ zero * bytes of data for extra. */ if (extra_size <= 0) return -EFAULT; /* kzalloc() ensures NULL-termination for essid_compat. */ extra = kzalloc(extra_size, GFP_KERNEL); if (!extra) return -ENOMEM; /* If it is a SET, get all the extra data in here */ if (IW_IS_SET(cmd) && (iwp->length != 0)) { if (copy_from_user(extra, iwp->pointer, iwp->length * descr->token_size)) { err = -EFAULT; goto out; } if (cmd == SIOCSIWENCODEEXT) { struct iw_encode_ext *ee = (void *) extra; if (iwp->length < sizeof(*ee) + ee->key_len) { err = -EFAULT; goto out; } } } if (IW_IS_GET(cmd) && !(descr->flags & IW_DESCR_FLAG_NOMAX)) { /* * If this is a GET, but not NOMAX, it means that the extra * data is not bounded by userspace, but by max_tokens. Thus * set the length to max_tokens. This matches the extra data * allocation. * The driver should fill it with the number of tokens it * provided, and it may check iwp->length rather than having * knowledge of max_tokens. If the driver doesn't change the * iwp->length, this ioctl just copies back max_token tokens * filled with zeroes. Hopefully the driver isn't claiming * them to be valid data. */ iwp->length = descr->max_tokens; } err = handler(dev, info, (union iwreq_data *) iwp, extra); iwp->length += essid_compat; /* If we have something to return to the user */ if (!err && IW_IS_GET(cmd)) { /* Check if there is enough buffer up there */ if (user_length < iwp->length) { err = -E2BIG; goto out; } if (copy_to_user(iwp->pointer, extra, iwp->length * descr->token_size)) { err = -EFAULT; goto out; } } /* Generate an event to notify listeners of the change */ if ((descr->flags & IW_DESCR_FLAG_EVENT) && ((err == 0) || (err == -EIWCOMMIT))) { union iwreq_data *data = (union iwreq_data *) iwp; if (descr->flags & IW_DESCR_FLAG_RESTRICT) /* If the event is restricted, don't * export the payload. */ wireless_send_event(dev, cmd, data, NULL); else wireless_send_event(dev, cmd, data, extra); } out: kfree(extra); return err; } /* * Call the commit handler in the driver * (if exist and if conditions are right) * * Note : our current commit strategy is currently pretty dumb, * but we will be able to improve on that... * The goal is to try to agreagate as many changes as possible * before doing the commit. Drivers that will define a commit handler * are usually those that need a reset after changing parameters, so * we want to minimise the number of reset. * A cool idea is to use a timer : at each "set" command, we re-set the * timer, when the timer eventually fires, we call the driver. * Hopefully, more on that later. * * Also, I'm waiting to see how many people will complain about the * netif_running(dev) test. I'm open on that one... * Hopefully, the driver will remember to do a commit in "open()" ;-) */ int call_commit_handler(struct net_device *dev) { #ifdef CONFIG_WIRELESS_EXT if (netif_running(dev) && dev->wireless_handlers && dev->wireless_handlers->standard[0]) /* Call the commit handler on the driver */ return dev->wireless_handlers->standard[0](dev, NULL, NULL, NULL); else return 0; /* Command completed successfully */ #else /* cfg80211 has no commit */ return 0; #endif } /* * Main IOCTl dispatcher. * Check the type of IOCTL and call the appropriate wrapper... */ static int wireless_process_ioctl(struct net *net, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, wext_ioctl_func standard, wext_ioctl_func private) { struct net_device *dev; iw_handler handler; /* Permissions are already checked in dev_ioctl() before calling us. * The copy_to/from_user() of ifr is also dealt with in there */ /* Make sure the device exist */ if ((dev = __dev_get_by_name(net, iwr->ifr_name)) == NULL) return -ENODEV; /* A bunch of special cases, then the generic case... * Note that 'cmd' is already filtered in dev_ioctl() with * (cmd >= SIOCIWFIRST && cmd <= SIOCIWLAST) */ if (cmd == SIOCGIWSTATS) return standard(dev, iwr, cmd, info, &iw_handler_get_iwstats); #ifdef CONFIG_WEXT_PRIV if (cmd == SIOCGIWPRIV && dev->wireless_handlers) return standard(dev, iwr, cmd, info, iw_handler_get_private); #endif /* Basic check */ if (!netif_device_present(dev)) return -ENODEV; /* New driver API : try to find the handler */ handler = get_handler(dev, cmd); if (handler) { /* Standard and private are not the same */ if (cmd < SIOCIWFIRSTPRIV) return standard(dev, iwr, cmd, info, handler); else if (private) return private(dev, iwr, cmd, info, handler); } return -EOPNOTSUPP; } /* If command is `set a parameter', or `get the encoding parameters', * check if the user has the right to do it. */ static int wext_permission_check(unsigned int cmd) { if ((IW_IS_SET(cmd) || cmd == SIOCGIWENCODE || cmd == SIOCGIWENCODEEXT) && !capable(CAP_NET_ADMIN)) return -EPERM; return 0; } /* entry point from dev ioctl */ static int wext_ioctl_dispatch(struct net *net, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, wext_ioctl_func standard, wext_ioctl_func private) { int ret = wext_permission_check(cmd); if (ret) return ret; dev_load(net, iwr->ifr_name); rtnl_lock(); ret = wireless_process_ioctl(net, iwr, cmd, info, standard, private); rtnl_unlock(); return ret; } /* * Wrapper to call a standard Wireless Extension handler. * We do various checks and also take care of moving data between * user space and kernel space. */ static int ioctl_standard_call(struct net_device * dev, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, iw_handler handler) { const struct iw_ioctl_description * descr; int ret = -EINVAL; /* Get the description of the IOCTL */ if (IW_IOCTL_IDX(cmd) >= standard_ioctl_num) return -EOPNOTSUPP; descr = &(standard_ioctl[IW_IOCTL_IDX(cmd)]); /* Check if we have a pointer to user space data or not */ if (descr->header_type != IW_HEADER_TYPE_POINT) { /* No extra arguments. Trivial to handle */ ret = handler(dev, info, &(iwr->u), NULL); /* Generate an event to notify listeners of the change */ if ((descr->flags & IW_DESCR_FLAG_EVENT) && ((ret == 0) || (ret == -EIWCOMMIT))) wireless_send_event(dev, cmd, &(iwr->u), NULL); } else { ret = ioctl_standard_iw_point(&iwr->u.data, cmd, descr, handler, dev, info); } /* Call commit handler if needed and defined */ if (ret == -EIWCOMMIT) ret = call_commit_handler(dev); /* Here, we will generate the appropriate event if needed */ return ret; } int wext_handle_ioctl(struct net *net, unsigned int cmd, void __user *arg) { struct iw_request_info info = { .cmd = cmd, .flags = 0 }; struct iwreq iwr; int ret; if (copy_from_user(&iwr, arg, sizeof(iwr))) return -EFAULT; iwr.ifr_name[sizeof(iwr.ifr_name) - 1] = 0; ret = wext_ioctl_dispatch(net, &iwr, cmd, &info, ioctl_standard_call, ioctl_private_call); if (ret >= 0 && IW_IS_GET(cmd) && copy_to_user(arg, &iwr, sizeof(struct iwreq))) return -EFAULT; return ret; } #ifdef CONFIG_COMPAT static int compat_standard_call(struct net_device *dev, struct iwreq *iwr, unsigned int cmd, struct iw_request_info *info, iw_handler handler) { const struct iw_ioctl_description *descr; struct compat_iw_point *iwp_compat; struct iw_point iwp; int err; descr = standard_ioctl + IW_IOCTL_IDX(cmd); if (descr->header_type != IW_HEADER_TYPE_POINT) return ioctl_standard_call(dev, iwr, cmd, info, handler); iwp_compat = (struct compat_iw_point *) &iwr->u.data; iwp.pointer = compat_ptr(iwp_compat->pointer); iwp.length = iwp_compat->length; iwp.flags = iwp_compat->flags; err = ioctl_standard_iw_point(&iwp, cmd, descr, handler, dev, info); iwp_compat->pointer = ptr_to_compat(iwp.pointer); iwp_compat->length = iwp.length; iwp_compat->flags = iwp.flags; return err; } int compat_wext_handle_ioctl(struct net *net, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; struct iw_request_info info; struct iwreq iwr; char *colon; int ret; if (copy_from_user(&iwr, argp, sizeof(struct iwreq))) return -EFAULT; iwr.ifr_name[IFNAMSIZ-1] = 0; colon = strchr(iwr.ifr_name, ':'); if (colon) *colon = 0; info.cmd = cmd; info.flags = IW_REQUEST_FLAG_COMPAT; ret = wext_ioctl_dispatch(net, &iwr, cmd, &info, compat_standard_call, compat_private_call); if (ret >= 0 && IW_IS_GET(cmd) && copy_to_user(argp, &iwr, sizeof(struct iwreq))) return -EFAULT; return ret; } #endif char *iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends, struct iw_event *iwe, int event_len) { int lcp_len = iwe_stream_lcp_len(info); event_len = iwe_stream_event_len_adjust(info, event_len); /* Check if it's possible */ if (likely((stream + event_len) < ends)) { iwe->len = event_len; /* Beware of alignment issues on 64 bits */ memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); memcpy(stream + lcp_len, &iwe->u, event_len - lcp_len); stream += event_len; } return stream; } EXPORT_SYMBOL(iwe_stream_add_event); char *iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends, struct iw_event *iwe, char *extra) { int event_len = iwe_stream_point_len(info) + iwe->u.data.length; int point_len = iwe_stream_point_len(info); int lcp_len = iwe_stream_lcp_len(info); /* Check if it's possible */ if (likely((stream + event_len) < ends)) { iwe->len = event_len; memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN); memcpy(stream + lcp_len, ((char *) &iwe->u) + IW_EV_POINT_OFF, IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN); if (iwe->u.data.length && extra) memcpy(stream + point_len, extra, iwe->u.data.length); stream += event_len; } return stream; } EXPORT_SYMBOL(iwe_stream_add_point); char *iwe_stream_add_value(struct iw_request_info *info, char *event, char *value, char *ends, struct iw_event *iwe, int event_len) { int lcp_len = iwe_stream_lcp_len(info); /* Don't duplicate LCP */ event_len -= IW_EV_LCP_LEN; /* Check if it's possible */ if (likely((value + event_len) < ends)) { /* Add new value */ memcpy(value, &iwe->u, event_len); value += event_len; /* Patch LCP */ iwe->len = value - event; memcpy(event, (char *) iwe, lcp_len); } return value; } EXPORT_SYMBOL(iwe_stream_add_value);
// SPDX-License-Identifier: GPL-2.0 #include <linux/types.h> #include <linux/string.h> #include <limits.h> #include <stdlib.h> #include <internal/lib.h> // page_size #include "../../../util/machine.h" #include "../../../util/map.h" #include "../../../util/symbol.h" #include <linux/ctype.h> #include <symbol/kallsyms.h> #if defined(__x86_64__) struct extra_kernel_map_info { int cnt; int max_cnt; struct extra_kernel_map *maps; bool get_entry_trampolines; u64 entry_trampoline; }; static int add_extra_kernel_map(struct extra_kernel_map_info *mi, u64 start, u64 end, u64 pgoff, const char *name) { if (mi->cnt >= mi->max_cnt) { void *buf; size_t sz; mi->max_cnt = mi->max_cnt ? mi->max_cnt * 2 : 32; sz = sizeof(struct extra_kernel_map) * mi->max_cnt; buf = realloc(mi->maps, sz); if (!buf) return -1; mi->maps = buf; } mi->maps[mi->cnt].start = start; mi->maps[mi->cnt].end = end; mi->maps[mi->cnt].pgoff = pgoff; strlcpy(mi->maps[mi->cnt].name, name, KMAP_NAME_LEN); mi->cnt += 1; return 0; } static int find_extra_kernel_maps(void *arg, const char *name, char type, u64 start) { struct extra_kernel_map_info *mi = arg; if (!mi->entry_trampoline && kallsyms2elf_binding(type) == STB_GLOBAL && !strcmp(name, "_entry_trampoline")) { mi->entry_trampoline = start; return 0; } if (is_entry_trampoline(name)) { u64 end = start + page_size; return add_extra_kernel_map(mi, start, end, 0, name); } return 0; } int machine__create_extra_kernel_maps(struct machine *machine, struct dso *kernel) { struct extra_kernel_map_info mi = { .cnt = 0, }; char filename[PATH_MAX]; int ret; int i; machine__get_kallsyms_filename(machine, filename, PATH_MAX); if (symbol__restricted_filename(filename, "/proc/kallsyms")) return 0; ret = kallsyms__parse(filename, &mi, find_extra_kernel_maps); if (ret) goto out_free; if (!mi.entry_trampoline) goto out_free; for (i = 0; i < mi.cnt; i++) { struct extra_kernel_map *xm = &mi.maps[i]; xm->pgoff = mi.entry_trampoline; ret = machine__create_extra_kernel_map(machine, kernel, xm); if (ret) goto out_free; } machine->trampolines_mapped = mi.cnt; out_free: free(mi.maps); return ret; } #endif
/* * QorIQ Elo3 DMA device tree stub [ controller @ offset 0x100000 ] * * Copyright 2013 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ dma0: dma@100300 { #address-cells = <1>; #size-cells = <1>; compatible = "fsl,elo3-dma"; reg = <0x100300 0x4>, <0x100600 0x4>; ranges = <0x0 0x100100 0x500>; dma-channel@0 { compatible = "fsl,eloplus-dma-channel"; reg = <0x0 0x80>; interrupts = <28 2 0 0>; }; dma-channel@80 { compatible = "fsl,eloplus-dma-channel"; reg = <0x80 0x80>; interrupts = <29 2 0 0>; }; dma-channel@100 { compatible = "fsl,eloplus-dma-channel"; reg = <0x100 0x80>; interrupts = <30 2 0 0>; }; dma-channel@180 { compatible = "fsl,eloplus-dma-channel"; reg = <0x180 0x80>; interrupts = <31 2 0 0>; }; dma-channel@300 { compatible = "fsl,eloplus-dma-channel"; reg = <0x300 0x80>; interrupts = <76 2 0 0>; }; dma-channel@380 { compatible = "fsl,eloplus-dma-channel"; reg = <0x380 0x80>; interrupts = <77 2 0 0>; }; dma-channel@400 { compatible = "fsl,eloplus-dma-channel"; reg = <0x400 0x80>; interrupts = <78 2 0 0>; }; dma-channel@480 { compatible = "fsl,eloplus-dma-channel"; reg = <0x480 0x80>; interrupts = <79 2 0 0>; }; };
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2018-2023 Oracle. All Rights Reserved. * Author: Darrick J. Wong <[email protected]> */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_trans_resv.h" #include "xfs_mount.h" #include "xfs_defer.h" #include "xfs_btree.h" #include "xfs_btree_staging.h" #include "xfs_bit.h" #include "xfs_log_format.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_inode.h" #include "xfs_alloc.h" #include "xfs_ialloc.h" #include "xfs_ialloc_btree.h" #include "xfs_icache.h" #include "xfs_rmap.h" #include "xfs_rmap_btree.h" #include "xfs_log.h" #include "xfs_trans_priv.h" #include "xfs_error.h" #include "xfs_health.h" #include "xfs_ag.h" #include "scrub/xfs_scrub.h" #include "scrub/scrub.h" #include "scrub/common.h" #include "scrub/btree.h" #include "scrub/trace.h" #include "scrub/repair.h" #include "scrub/bitmap.h" #include "scrub/agb_bitmap.h" #include "scrub/xfile.h" #include "scrub/xfarray.h" #include "scrub/newbt.h" #include "scrub/reap.h" /* * Inode Btree Repair * ================== * * A quick refresher of inode btrees on a v5 filesystem: * * - Inode records are read into memory in units of 'inode clusters'. However * many inodes fit in a cluster buffer is the smallest number of inodes that * can be allocated or freed. Clusters are never smaller than one fs block * though they can span multiple blocks. The size (in fs blocks) is * computed with xfs_icluster_size_fsb(). The fs block alignment of a * cluster is computed with xfs_ialloc_cluster_alignment(). * * - Each inode btree record can describe a single 'inode chunk'. The chunk * size is defined to be 64 inodes. If sparse inodes are enabled, every * inobt record must be aligned to the chunk size; if not, every record must * be aligned to the start of a cluster. It is possible to construct an XFS * geometry where one inobt record maps to multiple inode clusters; it is * also possible to construct a geometry where multiple inobt records map to * different parts of one inode cluster. * * - If sparse inodes are not enabled, the smallest unit of allocation for * inode records is enough to contain one inode chunk's worth of inodes. * * - If sparse inodes are enabled, the holemask field will be active. Each * bit of the holemask represents 4 potential inodes; if set, the * corresponding space does *not* contain inodes and must be left alone. * Clusters cannot be smaller than 4 inodes. The smallest unit of allocation * of inode records is one inode cluster. * * So what's the rebuild algorithm? * * Iterate the reverse mapping records looking for OWN_INODES and OWN_INOBT * records. The OWN_INOBT records are the old inode btree blocks and will be * cleared out after we've rebuilt the tree. Each possible inode cluster * within an OWN_INODES record will be read in; for each possible inobt record * associated with that cluster, compute the freemask calculated from the * i_mode data in the inode chunk. For sparse inodes the holemask will be * calculated by creating the properly aligned inobt record and punching out * any chunk that's missing. Inode allocations and frees grab the AGI first, * so repair protects itself from concurrent access by locking the AGI. * * Once we've reconstructed all the inode records, we can create new inode * btree roots and reload the btrees. We rebuild both inode trees at the same * time because they have the same rmap owner and it would be more complex to * figure out if the other tree isn't in need of a rebuild and which OWN_INOBT * blocks it owns. We have all the data we need to build both, so dump * everything and start over. * * We use the prefix 'xrep_ibt' because we rebuild both inode btrees at once. */ struct xrep_ibt { /* Record under construction. */ struct xfs_inobt_rec_incore rie; /* new inobt information */ struct xrep_newbt new_inobt; /* new finobt information */ struct xrep_newbt new_finobt; /* Old inode btree blocks we found in the rmap. */ struct xagb_bitmap old_iallocbt_blocks; /* Reconstructed inode records. */ struct xfarray *inode_records; struct xfs_scrub *sc; /* Number of inodes assigned disk space. */ unsigned int icount; /* Number of inodes in use. */ unsigned int iused; /* Number of finobt records needed. */ unsigned int finobt_recs; /* get_records()'s position in the inode record array. */ xfarray_idx_t array_cur; }; /* * Is this inode in use? If the inode is in memory we can tell from i_mode, * otherwise we have to check di_mode in the on-disk buffer. We only care * that the high (i.e. non-permission) bits of _mode are zero. This should be * safe because repair keeps all AG headers locked until the end, and process * trying to perform an inode allocation/free must lock the AGI. * * @cluster_ag_base is the inode offset of the cluster within the AG. * @cluster_bp is the cluster buffer. * @cluster_index is the inode offset within the inode cluster. */ STATIC int xrep_ibt_check_ifree( struct xrep_ibt *ri, xfs_agino_t cluster_ag_base, struct xfs_buf *cluster_bp, unsigned int cluster_index, bool *inuse) { struct xfs_scrub *sc = ri->sc; struct xfs_mount *mp = sc->mp; struct xfs_dinode *dip; xfs_agino_t agino; unsigned int cluster_buf_base; unsigned int offset; int error; agino = cluster_ag_base + cluster_index; /* Inode uncached or half assembled, read disk buffer */ cluster_buf_base = XFS_INO_TO_OFFSET(mp, cluster_ag_base); offset = (cluster_buf_base + cluster_index) * mp->m_sb.sb_inodesize; if (offset >= BBTOB(cluster_bp->b_length)) return -EFSCORRUPTED; dip = xfs_buf_offset(cluster_bp, offset); if (be16_to_cpu(dip->di_magic) != XFS_DINODE_MAGIC) return -EFSCORRUPTED; if (dip->di_version >= 3 && be64_to_cpu(dip->di_ino) != xfs_agino_to_ino(ri->sc->sa.pag, agino)) return -EFSCORRUPTED; /* Will the in-core inode tell us if it's in use? */ error = xchk_inode_is_allocated(sc, agino, inuse); if (!error) return 0; *inuse = dip->di_mode != 0; return 0; } /* Stash the accumulated inobt record for rebuilding. */ STATIC int xrep_ibt_stash( struct xrep_ibt *ri) { int error = 0; if (xchk_should_terminate(ri->sc, &error)) return error; ri->rie.ir_freecount = xfs_inobt_rec_freecount(&ri->rie); if (xfs_inobt_check_irec(ri->sc->sa.pag, &ri->rie) != NULL) return -EFSCORRUPTED; if (ri->rie.ir_freecount > 0) ri->finobt_recs++; trace_xrep_ibt_found(ri->sc->sa.pag, &ri->rie); error = xfarray_append(ri->inode_records, &ri->rie); if (error) return error; ri->rie.ir_startino = NULLAGINO; return 0; } /* * Given an extent of inodes and an inode cluster buffer, calculate the * location of the corresponding inobt record (creating it if necessary), * then update the parts of the holemask and freemask of that record that * correspond to the inode extent we were given. * * @cluster_ir_startino is the AG inode number of an inobt record that we're * proposing to create for this inode cluster. If sparse inodes are enabled, * we must round down to a chunk boundary to find the actual sparse record. * @cluster_bp is the buffer of the inode cluster. * @nr_inodes is the number of inodes to check from the cluster. */ STATIC int xrep_ibt_cluster_record( struct xrep_ibt *ri, xfs_agino_t cluster_ir_startino, struct xfs_buf *cluster_bp, unsigned int nr_inodes) { struct xfs_scrub *sc = ri->sc; struct xfs_mount *mp = sc->mp; xfs_agino_t ir_startino; unsigned int cluster_base; unsigned int cluster_index; int error = 0; ir_startino = cluster_ir_startino; if (xfs_has_sparseinodes(mp)) ir_startino = rounddown(ir_startino, XFS_INODES_PER_CHUNK); cluster_base = cluster_ir_startino - ir_startino; /* * If the accumulated inobt record doesn't map this cluster, add it to * the list and reset it. */ if (ri->rie.ir_startino != NULLAGINO && ri->rie.ir_startino + XFS_INODES_PER_CHUNK <= ir_startino) { error = xrep_ibt_stash(ri); if (error) return error; } if (ri->rie.ir_startino == NULLAGINO) { ri->rie.ir_startino = ir_startino; ri->rie.ir_free = XFS_INOBT_ALL_FREE; ri->rie.ir_holemask = 0xFFFF; ri->rie.ir_count = 0; } /* Record the whole cluster. */ ri->icount += nr_inodes; ri->rie.ir_count += nr_inodes; ri->rie.ir_holemask &= ~xfs_inobt_maskn( cluster_base / XFS_INODES_PER_HOLEMASK_BIT, nr_inodes / XFS_INODES_PER_HOLEMASK_BIT); /* Which inodes within this cluster are free? */ for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) { bool inuse = false; error = xrep_ibt_check_ifree(ri, cluster_ir_startino, cluster_bp, cluster_index, &inuse); if (error) return error; if (!inuse) continue; ri->iused++; ri->rie.ir_free &= ~XFS_INOBT_MASK(cluster_base + cluster_index); } return 0; } /* * For each inode cluster covering the physical extent recorded by the rmapbt, * we must calculate the properly aligned startino of that cluster, then * iterate each cluster to fill in used and filled masks appropriately. We * then use the (startino, used, filled) information to construct the * appropriate inode records. */ STATIC int xrep_ibt_process_cluster( struct xrep_ibt *ri, xfs_agblock_t cluster_bno) { struct xfs_imap imap; struct xfs_buf *cluster_bp; struct xfs_scrub *sc = ri->sc; struct xfs_mount *mp = sc->mp; struct xfs_ino_geometry *igeo = M_IGEO(mp); xfs_agino_t cluster_ag_base; xfs_agino_t irec_index; unsigned int nr_inodes; int error; nr_inodes = min_t(unsigned int, igeo->inodes_per_cluster, XFS_INODES_PER_CHUNK); /* * Grab the inode cluster buffer. This is safe to do with a broken * inobt because imap_to_bp directly maps the buffer without touching * either inode btree. */ imap.im_blkno = xfs_agbno_to_daddr(sc->sa.pag, cluster_bno); imap.im_len = XFS_FSB_TO_BB(mp, igeo->blocks_per_cluster); imap.im_boffset = 0; error = xfs_imap_to_bp(mp, sc->tp, &imap, &cluster_bp); if (error) return error; /* * Record the contents of each possible inobt record mapping this * cluster. */ cluster_ag_base = XFS_AGB_TO_AGINO(mp, cluster_bno); for (irec_index = 0; irec_index < igeo->inodes_per_cluster; irec_index += XFS_INODES_PER_CHUNK) { error = xrep_ibt_cluster_record(ri, cluster_ag_base + irec_index, cluster_bp, nr_inodes); if (error) break; } xfs_trans_brelse(sc->tp, cluster_bp); return error; } /* Check for any obvious conflicts in the inode chunk extent. */ STATIC int xrep_ibt_check_inode_ext( struct xfs_scrub *sc, xfs_agblock_t agbno, xfs_extlen_t len) { struct xfs_mount *mp = sc->mp; struct xfs_ino_geometry *igeo = M_IGEO(mp); xfs_agino_t agino; enum xbtree_recpacking outcome; int error; /* Inode records must be within the AG. */ if (!xfs_verify_agbext(sc->sa.pag, agbno, len)) return -EFSCORRUPTED; /* The entire record must align to the inode cluster size. */ if (!IS_ALIGNED(agbno, igeo->blocks_per_cluster) || !IS_ALIGNED(agbno + len, igeo->blocks_per_cluster)) return -EFSCORRUPTED; /* * The entire record must also adhere to the inode cluster alignment * size if sparse inodes are not enabled. */ if (!xfs_has_sparseinodes(mp) && (!IS_ALIGNED(agbno, igeo->cluster_align) || !IS_ALIGNED(agbno + len, igeo->cluster_align))) return -EFSCORRUPTED; /* * On a sparse inode fs, this cluster could be part of a sparse chunk. * Sparse clusters must be aligned to sparse chunk alignment. */ if (xfs_has_sparseinodes(mp) && mp->m_sb.sb_spino_align && (!IS_ALIGNED(agbno, mp->m_sb.sb_spino_align) || !IS_ALIGNED(agbno + len, mp->m_sb.sb_spino_align))) return -EFSCORRUPTED; /* Make sure the entire range of blocks are valid AG inodes. */ agino = XFS_AGB_TO_AGINO(mp, agbno); if (!xfs_verify_agino(sc->sa.pag, agino)) return -EFSCORRUPTED; agino = XFS_AGB_TO_AGINO(mp, agbno + len) - 1; if (!xfs_verify_agino(sc->sa.pag, agino)) return -EFSCORRUPTED; /* Make sure this isn't free space. */ error = xfs_alloc_has_records(sc->sa.bno_cur, agbno, len, &outcome); if (error) return error; if (outcome != XBTREE_RECPACKING_EMPTY) return -EFSCORRUPTED; return 0; } /* Found a fragment of the old inode btrees; dispose of them later. */ STATIC int xrep_ibt_record_old_btree_blocks( struct xrep_ibt *ri, const struct xfs_rmap_irec *rec) { if (!xfs_verify_agbext(ri->sc->sa.pag, rec->rm_startblock, rec->rm_blockcount)) return -EFSCORRUPTED; return xagb_bitmap_set(&ri->old_iallocbt_blocks, rec->rm_startblock, rec->rm_blockcount); } /* Record extents that belong to inode cluster blocks. */ STATIC int xrep_ibt_record_inode_blocks( struct xrep_ibt *ri, const struct xfs_rmap_irec *rec) { struct xfs_mount *mp = ri->sc->mp; struct xfs_ino_geometry *igeo = M_IGEO(mp); xfs_agblock_t cluster_base; int error; error = xrep_ibt_check_inode_ext(ri->sc, rec->rm_startblock, rec->rm_blockcount); if (error) return error; trace_xrep_ibt_walk_rmap(ri->sc->sa.pag, rec); /* * Record the free/hole masks for each inode cluster that could be * mapped by this rmap record. */ for (cluster_base = 0; cluster_base < rec->rm_blockcount; cluster_base += igeo->blocks_per_cluster) { error = xrep_ibt_process_cluster(ri, rec->rm_startblock + cluster_base); if (error) return error; } return 0; } STATIC int xrep_ibt_walk_rmap( struct xfs_btree_cur *cur, const struct xfs_rmap_irec *rec, void *priv) { struct xrep_ibt *ri = priv; int error = 0; if (xchk_should_terminate(ri->sc, &error)) return error; switch (rec->rm_owner) { case XFS_RMAP_OWN_INOBT: return xrep_ibt_record_old_btree_blocks(ri, rec); case XFS_RMAP_OWN_INODES: return xrep_ibt_record_inode_blocks(ri, rec); } return 0; } /* * Iterate all reverse mappings to find the inodes (OWN_INODES) and the inode * btrees (OWN_INOBT). Figure out if we have enough free space to reconstruct * the inode btrees. The caller must clean up the lists if anything goes * wrong. */ STATIC int xrep_ibt_find_inodes( struct xrep_ibt *ri) { struct xfs_scrub *sc = ri->sc; int error; ri->rie.ir_startino = NULLAGINO; /* Collect all reverse mappings for inode blocks. */ xrep_ag_btcur_init(sc, &sc->sa); error = xfs_rmap_query_all(sc->sa.rmap_cur, xrep_ibt_walk_rmap, ri); xchk_ag_btcur_free(&sc->sa); if (error) return error; /* If we have a record ready to go, add it to the array. */ if (ri->rie.ir_startino != NULLAGINO) return xrep_ibt_stash(ri); return 0; } /* Update the AGI counters. */ STATIC int xrep_ibt_reset_counters( struct xrep_ibt *ri) { struct xfs_scrub *sc = ri->sc; struct xfs_agi *agi = sc->sa.agi_bp->b_addr; unsigned int freecount = ri->icount - ri->iused; /* Trigger inode count recalculation */ xfs_force_summary_recalc(sc->mp); /* * The AGI header contains extra information related to the inode * btrees, so we must update those fields here. */ agi->agi_count = cpu_to_be32(ri->icount); agi->agi_freecount = cpu_to_be32(freecount); xfs_ialloc_log_agi(sc->tp, sc->sa.agi_bp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT); /* Reinitialize with the values we just logged. */ return xrep_reinit_pagi(sc); } /* Retrieve finobt data for bulk load. */ STATIC int xrep_fibt_get_records( struct xfs_btree_cur *cur, unsigned int idx, struct xfs_btree_block *block, unsigned int nr_wanted, void *priv) { struct xfs_inobt_rec_incore *irec = &cur->bc_rec.i; struct xrep_ibt *ri = priv; union xfs_btree_rec *block_rec; unsigned int loaded; int error; for (loaded = 0; loaded < nr_wanted; loaded++, idx++) { do { error = xfarray_load(ri->inode_records, ri->array_cur++, irec); } while (error == 0 && xfs_inobt_rec_freecount(irec) == 0); if (error) return error; block_rec = xfs_btree_rec_addr(cur, idx, block); cur->bc_ops->init_rec_from_cur(cur, block_rec); } return loaded; } /* Retrieve inobt data for bulk load. */ STATIC int xrep_ibt_get_records( struct xfs_btree_cur *cur, unsigned int idx, struct xfs_btree_block *block, unsigned int nr_wanted, void *priv) { struct xfs_inobt_rec_incore *irec = &cur->bc_rec.i; struct xrep_ibt *ri = priv; union xfs_btree_rec *block_rec; unsigned int loaded; int error; for (loaded = 0; loaded < nr_wanted; loaded++, idx++) { error = xfarray_load(ri->inode_records, ri->array_cur++, irec); if (error) return error; block_rec = xfs_btree_rec_addr(cur, idx, block); cur->bc_ops->init_rec_from_cur(cur, block_rec); } return loaded; } /* Feed one of the new inobt blocks to the bulk loader. */ STATIC int xrep_ibt_claim_block( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, void *priv) { struct xrep_ibt *ri = priv; return xrep_newbt_claim_block(cur, &ri->new_inobt, ptr); } /* Feed one of the new finobt blocks to the bulk loader. */ STATIC int xrep_fibt_claim_block( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, void *priv) { struct xrep_ibt *ri = priv; return xrep_newbt_claim_block(cur, &ri->new_finobt, ptr); } /* Make sure the records do not overlap in inumber address space. */ STATIC int xrep_ibt_check_overlap( struct xrep_ibt *ri) { struct xfs_inobt_rec_incore irec; xfarray_idx_t cur; xfs_agino_t next_agino = 0; int error = 0; foreach_xfarray_idx(ri->inode_records, cur) { if (xchk_should_terminate(ri->sc, &error)) return error; error = xfarray_load(ri->inode_records, cur, &irec); if (error) return error; if (irec.ir_startino < next_agino) return -EFSCORRUPTED; next_agino = irec.ir_startino + XFS_INODES_PER_CHUNK; } return error; } /* Build new inode btrees and dispose of the old one. */ STATIC int xrep_ibt_build_new_trees( struct xrep_ibt *ri) { struct xfs_scrub *sc = ri->sc; struct xfs_btree_cur *ino_cur; struct xfs_btree_cur *fino_cur = NULL; bool need_finobt; int error; need_finobt = xfs_has_finobt(sc->mp); /* * Create new btrees for staging all the inobt records we collected * earlier. The records were collected in order of increasing agino, * so we do not have to sort them. Ensure there are no overlapping * records. */ error = xrep_ibt_check_overlap(ri); if (error) return error; /* * The new inode btrees will not be rooted in the AGI until we've * successfully rebuilt the tree. * * Start by setting up the inobt staging cursor. */ xrep_newbt_init_ag(&ri->new_inobt, sc, &XFS_RMAP_OINFO_INOBT, xfs_agbno_to_fsb(sc->sa.pag, XFS_IBT_BLOCK(sc->mp)), XFS_AG_RESV_NONE); ri->new_inobt.bload.claim_block = xrep_ibt_claim_block; ri->new_inobt.bload.get_records = xrep_ibt_get_records; ino_cur = xfs_inobt_init_cursor(sc->sa.pag, NULL, NULL); xfs_btree_stage_afakeroot(ino_cur, &ri->new_inobt.afake); error = xfs_btree_bload_compute_geometry(ino_cur, &ri->new_inobt.bload, xfarray_length(ri->inode_records)); if (error) goto err_inocur; /* Set up finobt staging cursor. */ if (need_finobt) { enum xfs_ag_resv_type resv = XFS_AG_RESV_METADATA; if (sc->mp->m_finobt_nores) resv = XFS_AG_RESV_NONE; xrep_newbt_init_ag(&ri->new_finobt, sc, &XFS_RMAP_OINFO_INOBT, xfs_agbno_to_fsb(sc->sa.pag, XFS_FIBT_BLOCK(sc->mp)), resv); ri->new_finobt.bload.claim_block = xrep_fibt_claim_block; ri->new_finobt.bload.get_records = xrep_fibt_get_records; fino_cur = xfs_finobt_init_cursor(sc->sa.pag, NULL, NULL); xfs_btree_stage_afakeroot(fino_cur, &ri->new_finobt.afake); error = xfs_btree_bload_compute_geometry(fino_cur, &ri->new_finobt.bload, ri->finobt_recs); if (error) goto err_finocur; } /* Last chance to abort before we start committing fixes. */ if (xchk_should_terminate(sc, &error)) goto err_finocur; /* Reserve all the space we need to build the new btrees. */ error = xrep_newbt_alloc_blocks(&ri->new_inobt, ri->new_inobt.bload.nr_blocks); if (error) goto err_finocur; if (need_finobt) { error = xrep_newbt_alloc_blocks(&ri->new_finobt, ri->new_finobt.bload.nr_blocks); if (error) goto err_finocur; } /* Add all inobt records. */ ri->array_cur = XFARRAY_CURSOR_INIT; error = xfs_btree_bload(ino_cur, &ri->new_inobt.bload, ri); if (error) goto err_finocur; /* Add all finobt records. */ if (need_finobt) { ri->array_cur = XFARRAY_CURSOR_INIT; error = xfs_btree_bload(fino_cur, &ri->new_finobt.bload, ri); if (error) goto err_finocur; } /* * Install the new btrees in the AG header. After this point the old * btrees are no longer accessible and the new trees are live. */ xfs_inobt_commit_staged_btree(ino_cur, sc->tp, sc->sa.agi_bp); xfs_btree_del_cursor(ino_cur, 0); if (fino_cur) { xfs_inobt_commit_staged_btree(fino_cur, sc->tp, sc->sa.agi_bp); xfs_btree_del_cursor(fino_cur, 0); } /* Reset the AGI counters now that we've changed the inode roots. */ error = xrep_ibt_reset_counters(ri); if (error) goto err_finobt; /* Free unused blocks and bitmap. */ if (need_finobt) { error = xrep_newbt_commit(&ri->new_finobt); if (error) goto err_inobt; } error = xrep_newbt_commit(&ri->new_inobt); if (error) return error; return xrep_roll_ag_trans(sc); err_finocur: if (need_finobt) xfs_btree_del_cursor(fino_cur, error); err_inocur: xfs_btree_del_cursor(ino_cur, error); err_finobt: if (need_finobt) xrep_newbt_cancel(&ri->new_finobt); err_inobt: xrep_newbt_cancel(&ri->new_inobt); return error; } /* * Now that we've logged the roots of the new btrees, invalidate all of the * old blocks and free them. */ STATIC int xrep_ibt_remove_old_trees( struct xrep_ibt *ri) { struct xfs_scrub *sc = ri->sc; int error; /* * Free the old inode btree blocks if they're not in use. It's ok to * reap with XFS_AG_RESV_NONE even if the finobt had a per-AG * reservation because we reset the reservation before releasing the * AGI and AGF header buffer locks. */ error = xrep_reap_agblocks(sc, &ri->old_iallocbt_blocks, &XFS_RMAP_OINFO_INOBT, XFS_AG_RESV_NONE); if (error) return error; /* * If the finobt is enabled and has a per-AG reservation, make sure we * reinitialize the per-AG reservations. */ if (xfs_has_finobt(sc->mp) && !sc->mp->m_finobt_nores) sc->flags |= XREP_RESET_PERAG_RESV; return 0; } /* Repair both inode btrees. */ int xrep_iallocbt( struct xfs_scrub *sc) { struct xrep_ibt *ri; struct xfs_mount *mp = sc->mp; char *descr; xfs_agino_t first_agino, last_agino; int error = 0; /* We require the rmapbt to rebuild anything. */ if (!xfs_has_rmapbt(mp)) return -EOPNOTSUPP; ri = kzalloc(sizeof(struct xrep_ibt), XCHK_GFP_FLAGS); if (!ri) return -ENOMEM; ri->sc = sc; /* We rebuild both inode btrees. */ sc->sick_mask = XFS_SICK_AG_INOBT | XFS_SICK_AG_FINOBT; /* Set up enough storage to handle an AG with nothing but inodes. */ xfs_agino_range(mp, pag_agno(sc->sa.pag), &first_agino, &last_agino); last_agino /= XFS_INODES_PER_CHUNK; descr = xchk_xfile_ag_descr(sc, "inode index records"); error = xfarray_create(descr, last_agino, sizeof(struct xfs_inobt_rec_incore), &ri->inode_records); kfree(descr); if (error) goto out_ri; /* Collect the inode data and find the old btree blocks. */ xagb_bitmap_init(&ri->old_iallocbt_blocks); error = xrep_ibt_find_inodes(ri); if (error) goto out_bitmap; /* Rebuild the inode indexes. */ error = xrep_ibt_build_new_trees(ri); if (error) goto out_bitmap; /* Kill the old tree. */ error = xrep_ibt_remove_old_trees(ri); if (error) goto out_bitmap; out_bitmap: xagb_bitmap_destroy(&ri->old_iallocbt_blocks); xfarray_destroy(ri->inode_records); out_ri: kfree(ri); return error; } /* Make sure both btrees are ok after we've rebuilt them. */ int xrep_revalidate_iallocbt( struct xfs_scrub *sc) { __u32 old_type = sc->sm->sm_type; int error; /* * We must update sm_type temporarily so that the tree-to-tree cross * reference checks will work in the correct direction, and also so * that tracing will report correctly if there are more errors. */ sc->sm->sm_type = XFS_SCRUB_TYPE_INOBT; error = xchk_iallocbt(sc); if (error) goto out; if (xfs_has_finobt(sc->mp)) { sc->sm->sm_type = XFS_SCRUB_TYPE_FINOBT; error = xchk_iallocbt(sc); } out: sc->sm->sm_type = old_type; return error; }
/* longlong.h -- definitions for mixed size 32/64 bit arithmetic. * Note: I added some stuff for use with gnupg * * Copyright (C) 1991, 1992, 1993, 1994, 1996, 1998, * 2000, 2001, 2002, 2003 Free Software Foundation, Inc. * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU Library General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Library General Public * License for more details. * * You should have received a copy of the GNU Library General Public License * along with this file; see the file COPYING.LIB. If not, write to * the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, * MA 02111-1307, USA. */ #include <linux/count_zeros.h> /* You have to define the following before including this file: * * UWtype -- An unsigned type, default type for operations (typically a "word") * UHWtype -- An unsigned type, at least half the size of UWtype. * UDWtype -- An unsigned type, at least twice as large a UWtype * W_TYPE_SIZE -- size in bits of UWtype * * SItype, USItype -- Signed and unsigned 32 bit types. * DItype, UDItype -- Signed and unsigned 64 bit types. * * On a 32 bit machine UWtype should typically be USItype; * on a 64 bit machine, UWtype should typically be UDItype. */ #define __BITS4 (W_TYPE_SIZE / 4) #define __ll_B ((UWtype) 1 << (W_TYPE_SIZE / 2)) #define __ll_lowpart(t) ((UWtype) (t) & (__ll_B - 1)) #define __ll_highpart(t) ((UWtype) (t) >> (W_TYPE_SIZE / 2)) /* This is used to make sure no undesirable sharing between different libraries that use this file takes place. */ #ifndef __MPN #define __MPN(x) __##x #endif /* Define auxiliary asm macros. * * 1) umul_ppmm(high_prod, low_prod, multiplier, multiplicand) multiplies two * UWtype integers MULTIPLIER and MULTIPLICAND, and generates a two UWtype * word product in HIGH_PROD and LOW_PROD. * * 2) __umulsidi3(a,b) multiplies two UWtype integers A and B, and returns a * UDWtype product. This is just a variant of umul_ppmm. * 3) udiv_qrnnd(quotient, remainder, high_numerator, low_numerator, * denominator) divides a UDWtype, composed by the UWtype integers * HIGH_NUMERATOR and LOW_NUMERATOR, by DENOMINATOR and places the quotient * in QUOTIENT and the remainder in REMAINDER. HIGH_NUMERATOR must be less * than DENOMINATOR for correct operation. If, in addition, the most * significant bit of DENOMINATOR must be 1, then the pre-processor symbol * UDIV_NEEDS_NORMALIZATION is defined to 1. * 4) sdiv_qrnnd(quotient, remainder, high_numerator, low_numerator, * denominator). Like udiv_qrnnd but the numbers are signed. The quotient * is rounded towards 0. * * 5) count_leading_zeros(count, x) counts the number of zero-bits from the * msb to the first non-zero bit in the UWtype X. This is the number of * steps X needs to be shifted left to set the msb. Undefined for X == 0, * unless the symbol COUNT_LEADING_ZEROS_0 is defined to some value. * * 6) count_trailing_zeros(count, x) like count_leading_zeros, but counts * from the least significant end. * * 7) add_ssaaaa(high_sum, low_sum, high_addend_1, low_addend_1, * high_addend_2, low_addend_2) adds two UWtype integers, composed by * HIGH_ADDEND_1 and LOW_ADDEND_1, and HIGH_ADDEND_2 and LOW_ADDEND_2 * respectively. The result is placed in HIGH_SUM and LOW_SUM. Overflow * (i.e. carry out) is not stored anywhere, and is lost. * * 8) sub_ddmmss(high_difference, low_difference, high_minuend, low_minuend, * high_subtrahend, low_subtrahend) subtracts two two-word UWtype integers, * composed by HIGH_MINUEND_1 and LOW_MINUEND_1, and HIGH_SUBTRAHEND_2 and * LOW_SUBTRAHEND_2 respectively. The result is placed in HIGH_DIFFERENCE * and LOW_DIFFERENCE. Overflow (i.e. carry out) is not stored anywhere, * and is lost. * * If any of these macros are left undefined for a particular CPU, * C macros are used. */ /* The CPUs come in alphabetical order below. * * Please add support for more CPUs here, or improve the current support * for the CPUs below! */ #if defined(__GNUC__) && !defined(NO_ASM) /* We sometimes need to clobber "cc" with gcc2, but that would not be understood by gcc1. Use cpp to avoid major code duplication. */ #if __GNUC__ < 2 #define __CLOBBER_CC #define __AND_CLOBBER_CC #else /* __GNUC__ >= 2 */ #define __CLOBBER_CC : "cc" #define __AND_CLOBBER_CC , "cc" #endif /* __GNUC__ < 2 */ /*************************************** ************** A29K ***************** ***************************************/ #if (defined(__a29k__) || defined(_AM29K)) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("add %1,%4,%5\n" \ "addc %0,%2,%3" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "%r" ((USItype)(ah)), \ "rI" ((USItype)(bh)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("sub %1,%4,%5\n" \ "subc %0,%2,%3" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(bh)), \ "r" ((USItype)(al)), \ "rI" ((USItype)(bl))) #define umul_ppmm(xh, xl, m0, m1) \ do { \ USItype __m0 = (m0), __m1 = (m1); \ __asm__ ("multiplu %0,%1,%2" \ : "=r" ((USItype)(xl)) \ : "r" (__m0), \ "r" (__m1)); \ __asm__ ("multmu %0,%1,%2" \ : "=r" ((USItype)(xh)) \ : "r" (__m0), \ "r" (__m1)); \ } while (0) #define udiv_qrnnd(q, r, n1, n0, d) \ __asm__ ("dividu %0,%3,%4" \ : "=r" ((USItype)(q)), \ "=q" ((USItype)(r)) \ : "1" ((USItype)(n1)), \ "r" ((USItype)(n0)), \ "r" ((USItype)(d))) #endif /* __a29k__ */ #if defined(__alpha) && W_TYPE_SIZE == 64 #define umul_ppmm(ph, pl, m0, m1) \ do { \ UDItype __m0 = (m0), __m1 = (m1); \ (ph) = __builtin_alpha_umulh(__m0, __m1); \ (pl) = __m0 * __m1; \ } while (0) #define UMUL_TIME 46 #ifndef LONGLONG_STANDALONE #define udiv_qrnnd(q, r, n1, n0, d) \ do { UDItype __r; \ (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ (r) = __r; \ } while (0) extern UDItype __udiv_qrnnd(UDItype *, UDItype, UDItype, UDItype); #define UDIV_TIME 220 #endif /* LONGLONG_STANDALONE */ #endif /* __alpha */ /*************************************** ************** ARM ****************** ***************************************/ #if defined(__arm__) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("adds %1, %4, %5\n" \ "adc %0, %2, %3" \ : "=r" (sh), \ "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "rI" ((USItype)(bh)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("subs %1, %4, %5\n" \ "sbc %0, %2, %3" \ : "=r" (sh), \ "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(bh)), \ "r" ((USItype)(al)), \ "rI" ((USItype)(bl))) #if defined __ARM_ARCH_2__ || defined __ARM_ARCH_3__ #define umul_ppmm(xh, xl, a, b) \ __asm__ ("@ Inlined umul_ppmm\n" \ "mov %|r0, %2, lsr #16 @ AAAA\n" \ "mov %|r2, %3, lsr #16 @ BBBB\n" \ "bic %|r1, %2, %|r0, lsl #16 @ aaaa\n" \ "bic %0, %3, %|r2, lsl #16 @ bbbb\n" \ "mul %1, %|r1, %|r2 @ aaaa * BBBB\n" \ "mul %|r2, %|r0, %|r2 @ AAAA * BBBB\n" \ "mul %|r1, %0, %|r1 @ aaaa * bbbb\n" \ "mul %0, %|r0, %0 @ AAAA * bbbb\n" \ "adds %|r0, %1, %0 @ central sum\n" \ "addcs %|r2, %|r2, #65536\n" \ "adds %1, %|r1, %|r0, lsl #16\n" \ "adc %0, %|r2, %|r0, lsr #16" \ : "=&r" (xh), \ "=r" (xl) \ : "r" ((USItype)(a)), \ "r" ((USItype)(b)) \ : "r0", "r1", "r2") #else #define umul_ppmm(xh, xl, a, b) \ __asm__ ("@ Inlined umul_ppmm\n" \ "umull %1, %0, %2, %3" \ : "=&r" (xh), \ "=&r" (xl) \ : "r" ((USItype)(a)), \ "r" ((USItype)(b)) \ : "r0", "r1") #endif #define UMUL_TIME 20 #define UDIV_TIME 100 #endif /* __arm__ */ /*************************************** ************** CLIPPER ************** ***************************************/ #if defined(__clipper__) && W_TYPE_SIZE == 32 #define umul_ppmm(w1, w0, u, v) \ ({union {UDItype __ll; \ struct {USItype __l, __h; } __i; \ } __xx; \ __asm__ ("mulwux %2,%0" \ : "=r" (__xx.__ll) \ : "%0" ((USItype)(u)), \ "r" ((USItype)(v))); \ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; }) #define smul_ppmm(w1, w0, u, v) \ ({union {DItype __ll; \ struct {SItype __l, __h; } __i; \ } __xx; \ __asm__ ("mulwx %2,%0" \ : "=r" (__xx.__ll) \ : "%0" ((SItype)(u)), \ "r" ((SItype)(v))); \ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; }) #define __umulsidi3(u, v) \ ({UDItype __w; \ __asm__ ("mulwux %2,%0" \ : "=r" (__w) \ : "%0" ((USItype)(u)), \ "r" ((USItype)(v))); \ __w; }) #endif /* __clipper__ */ /*************************************** ************** GMICRO *************** ***************************************/ #if defined(__gmicro__) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("add.w %5,%1\n" \ "addx %3,%0" \ : "=g" ((USItype)(sh)), \ "=&g" ((USItype)(sl)) \ : "%0" ((USItype)(ah)), \ "g" ((USItype)(bh)), \ "%1" ((USItype)(al)), \ "g" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("sub.w %5,%1\n" \ "subx %3,%0" \ : "=g" ((USItype)(sh)), \ "=&g" ((USItype)(sl)) \ : "0" ((USItype)(ah)), \ "g" ((USItype)(bh)), \ "1" ((USItype)(al)), \ "g" ((USItype)(bl))) #define umul_ppmm(ph, pl, m0, m1) \ __asm__ ("mulx %3,%0,%1" \ : "=g" ((USItype)(ph)), \ "=r" ((USItype)(pl)) \ : "%0" ((USItype)(m0)), \ "g" ((USItype)(m1))) #define udiv_qrnnd(q, r, nh, nl, d) \ __asm__ ("divx %4,%0,%1" \ : "=g" ((USItype)(q)), \ "=r" ((USItype)(r)) \ : "1" ((USItype)(nh)), \ "0" ((USItype)(nl)), \ "g" ((USItype)(d))) #endif /*************************************** ************** HPPA ***************** ***************************************/ #if defined(__hppa) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("add %4,%5,%1\n" \ "addc %2,%3,%0" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "%rM" ((USItype)(ah)), \ "rM" ((USItype)(bh)), \ "%rM" ((USItype)(al)), \ "rM" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("sub %4,%5,%1\n" \ "subb %2,%3,%0" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "rM" ((USItype)(ah)), \ "rM" ((USItype)(bh)), \ "rM" ((USItype)(al)), \ "rM" ((USItype)(bl))) #if 0 && defined(_PA_RISC1_1) /* xmpyu uses floating point register which is not allowed in Linux kernel. */ #define umul_ppmm(wh, wl, u, v) \ do { \ union {UDItype __ll; \ struct {USItype __h, __l; } __i; \ } __xx; \ __asm__ ("xmpyu %1,%2,%0" \ : "=*f" (__xx.__ll) \ : "*f" ((USItype)(u)), \ "*f" ((USItype)(v))); \ (wh) = __xx.__i.__h; \ (wl) = __xx.__i.__l; \ } while (0) #define UMUL_TIME 8 #define UDIV_TIME 60 #else #define UMUL_TIME 40 #define UDIV_TIME 80 #endif #if 0 /* #ifndef LONGLONG_STANDALONE */ #define udiv_qrnnd(q, r, n1, n0, d) \ do { USItype __r; \ (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ (r) = __r; \ } while (0) extern USItype __udiv_qrnnd(); #endif /* LONGLONG_STANDALONE */ #endif /* hppa */ /*************************************** ************** I370 ***************** ***************************************/ #if (defined(__i370__) || defined(__mvs__)) && W_TYPE_SIZE == 32 #define umul_ppmm(xh, xl, m0, m1) \ do { \ union {UDItype __ll; \ struct {USItype __h, __l; } __i; \ } __xx; \ USItype __m0 = (m0), __m1 = (m1); \ __asm__ ("mr %0,%3" \ : "=r" (__xx.__i.__h), \ "=r" (__xx.__i.__l) \ : "%1" (__m0), \ "r" (__m1)); \ (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ (xh) += ((((SItype) __m0 >> 31) & __m1) \ + (((SItype) __m1 >> 31) & __m0)); \ } while (0) #define smul_ppmm(xh, xl, m0, m1) \ do { \ union {DItype __ll; \ struct {USItype __h, __l; } __i; \ } __xx; \ __asm__ ("mr %0,%3" \ : "=r" (__xx.__i.__h), \ "=r" (__xx.__i.__l) \ : "%1" (m0), \ "r" (m1)); \ (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ } while (0) #define sdiv_qrnnd(q, r, n1, n0, d) \ do { \ union {DItype __ll; \ struct {USItype __h, __l; } __i; \ } __xx; \ __xx.__i.__h = n1; __xx.__i.__l = n0; \ __asm__ ("dr %0,%2" \ : "=r" (__xx.__ll) \ : "0" (__xx.__ll), "r" (d)); \ (q) = __xx.__i.__l; (r) = __xx.__i.__h; \ } while (0) #endif /*************************************** ************** I386 ***************** ***************************************/ #undef __i386__ #if (defined(__i386__) || defined(__i486__)) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("addl %5,%1\n" \ "adcl %3,%0" \ : "=r" (sh), \ "=&r" (sl) \ : "%0" ((USItype)(ah)), \ "g" ((USItype)(bh)), \ "%1" ((USItype)(al)), \ "g" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("subl %5,%1\n" \ "sbbl %3,%0" \ : "=r" (sh), \ "=&r" (sl) \ : "0" ((USItype)(ah)), \ "g" ((USItype)(bh)), \ "1" ((USItype)(al)), \ "g" ((USItype)(bl))) #define umul_ppmm(w1, w0, u, v) \ __asm__ ("mull %3" \ : "=a" (w0), \ "=d" (w1) \ : "%0" ((USItype)(u)), \ "rm" ((USItype)(v))) #define udiv_qrnnd(q, r, n1, n0, d) \ __asm__ ("divl %4" \ : "=a" (q), \ "=d" (r) \ : "0" ((USItype)(n0)), \ "1" ((USItype)(n1)), \ "rm" ((USItype)(d))) #ifndef UMUL_TIME #define UMUL_TIME 40 #endif #ifndef UDIV_TIME #define UDIV_TIME 40 #endif #endif /* 80x86 */ /*************************************** ************** I860 ***************** ***************************************/ #if defined(__i860__) && W_TYPE_SIZE == 32 #define rshift_rhlc(r, h, l, c) \ __asm__ ("shr %3,r0,r0\n" \ "shrd %1,%2,%0" \ "=r" (r) : "r" (h), "r" (l), "rn" (c)) #endif /* i860 */ /*************************************** ************** I960 ***************** ***************************************/ #if defined(__i960__) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("cmpo 1,0\n" \ "addc %5,%4,%1\n" \ "addc %3,%2,%0" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "%dI" ((USItype)(ah)), \ "dI" ((USItype)(bh)), \ "%dI" ((USItype)(al)), \ "dI" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("cmpo 0,0\n" \ "subc %5,%4,%1\n" \ "subc %3,%2,%0" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "dI" ((USItype)(ah)), \ "dI" ((USItype)(bh)), \ "dI" ((USItype)(al)), \ "dI" ((USItype)(bl))) #define umul_ppmm(w1, w0, u, v) \ ({union {UDItype __ll; \ struct {USItype __l, __h; } __i; \ } __xx; \ __asm__ ("emul %2,%1,%0" \ : "=d" (__xx.__ll) \ : "%dI" ((USItype)(u)), \ "dI" ((USItype)(v))); \ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; }) #define __umulsidi3(u, v) \ ({UDItype __w; \ __asm__ ("emul %2,%1,%0" \ : "=d" (__w) \ : "%dI" ((USItype)(u)), \ "dI" ((USItype)(v))); \ __w; }) #define udiv_qrnnd(q, r, nh, nl, d) \ do { \ union {UDItype __ll; \ struct {USItype __l, __h; } __i; \ } __nn; \ __nn.__i.__h = (nh); __nn.__i.__l = (nl); \ __asm__ ("ediv %d,%n,%0" \ : "=d" (__rq.__ll) \ : "dI" (__nn.__ll), \ "dI" ((USItype)(d))); \ (r) = __rq.__i.__l; (q) = __rq.__i.__h; \ } while (0) #if defined(__i960mx) /* what is the proper symbol to test??? */ #define rshift_rhlc(r, h, l, c) \ do { \ union {UDItype __ll; \ struct {USItype __l, __h; } __i; \ } __nn; \ __nn.__i.__h = (h); __nn.__i.__l = (l); \ __asm__ ("shre %2,%1,%0" \ : "=d" (r) : "dI" (__nn.__ll), "dI" (c)); \ } #endif /* i960mx */ #endif /* i960 */ /*************************************** ************** 68000 **************** ***************************************/ #if (defined(__mc68000__) || defined(__mc68020__) || defined(__NeXT__) || defined(mc68020)) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("add%.l %5,%1\n" \ "addx%.l %3,%0" \ : "=d" ((USItype)(sh)), \ "=&d" ((USItype)(sl)) \ : "%0" ((USItype)(ah)), \ "d" ((USItype)(bh)), \ "%1" ((USItype)(al)), \ "g" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("sub%.l %5,%1\n" \ "subx%.l %3,%0" \ : "=d" ((USItype)(sh)), \ "=&d" ((USItype)(sl)) \ : "0" ((USItype)(ah)), \ "d" ((USItype)(bh)), \ "1" ((USItype)(al)), \ "g" ((USItype)(bl))) #if (defined(__mc68020__) || defined(__NeXT__) || defined(mc68020)) #define umul_ppmm(w1, w0, u, v) \ __asm__ ("mulu%.l %3,%1:%0" \ : "=d" ((USItype)(w0)), \ "=d" ((USItype)(w1)) \ : "%0" ((USItype)(u)), \ "dmi" ((USItype)(v))) #define UMUL_TIME 45 #define udiv_qrnnd(q, r, n1, n0, d) \ __asm__ ("divu%.l %4,%1:%0" \ : "=d" ((USItype)(q)), \ "=d" ((USItype)(r)) \ : "0" ((USItype)(n0)), \ "1" ((USItype)(n1)), \ "dmi" ((USItype)(d))) #define UDIV_TIME 90 #define sdiv_qrnnd(q, r, n1, n0, d) \ __asm__ ("divs%.l %4,%1:%0" \ : "=d" ((USItype)(q)), \ "=d" ((USItype)(r)) \ : "0" ((USItype)(n0)), \ "1" ((USItype)(n1)), \ "dmi" ((USItype)(d))) #else /* not mc68020 */ #define umul_ppmm(xh, xl, a, b) \ do { USItype __umul_tmp1, __umul_tmp2; \ __asm__ ("| Inlined umul_ppmm\n" \ "move%.l %5,%3\n" \ "move%.l %2,%0\n" \ "move%.w %3,%1\n" \ "swap %3\n" \ "swap %0\n" \ "mulu %2,%1\n" \ "mulu %3,%0\n" \ "mulu %2,%3\n" \ "swap %2\n" \ "mulu %5,%2\n" \ "add%.l %3,%2\n" \ "jcc 1f\n" \ "add%.l %#0x10000,%0\n" \ "1: move%.l %2,%3\n" \ "clr%.w %2\n" \ "swap %2\n" \ "swap %3\n" \ "clr%.w %3\n" \ "add%.l %3,%1\n" \ "addx%.l %2,%0\n" \ "| End inlined umul_ppmm" \ : "=&d" ((USItype)(xh)), "=&d" ((USItype)(xl)), \ "=d" (__umul_tmp1), "=&d" (__umul_tmp2) \ : "%2" ((USItype)(a)), "d" ((USItype)(b))); \ } while (0) #define UMUL_TIME 100 #define UDIV_TIME 400 #endif /* not mc68020 */ #endif /* mc68000 */ /*************************************** ************** 88000 **************** ***************************************/ #if defined(__m88000__) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("addu.co %1,%r4,%r5\n" \ "addu.ci %0,%r2,%r3" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "%rJ" ((USItype)(ah)), \ "rJ" ((USItype)(bh)), \ "%rJ" ((USItype)(al)), \ "rJ" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("subu.co %1,%r4,%r5\n" \ "subu.ci %0,%r2,%r3" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "rJ" ((USItype)(ah)), \ "rJ" ((USItype)(bh)), \ "rJ" ((USItype)(al)), \ "rJ" ((USItype)(bl))) #if defined(__m88110__) #define umul_ppmm(wh, wl, u, v) \ do { \ union {UDItype __ll; \ struct {USItype __h, __l; } __i; \ } __x; \ __asm__ ("mulu.d %0,%1,%2" : "=r" (__x.__ll) : "r" (u), "r" (v)); \ (wh) = __x.__i.__h; \ (wl) = __x.__i.__l; \ } while (0) #define udiv_qrnnd(q, r, n1, n0, d) \ ({union {UDItype __ll; \ struct {USItype __h, __l; } __i; \ } __x, __q; \ __x.__i.__h = (n1); __x.__i.__l = (n0); \ __asm__ ("divu.d %0,%1,%2" \ : "=r" (__q.__ll) : "r" (__x.__ll), "r" (d)); \ (r) = (n0) - __q.__l * (d); (q) = __q.__l; }) #define UMUL_TIME 5 #define UDIV_TIME 25 #else #define UMUL_TIME 17 #define UDIV_TIME 150 #endif /* __m88110__ */ #endif /* __m88000__ */ /*************************************** ************** MIPS ***************** ***************************************/ #if defined(__mips__) && W_TYPE_SIZE == 32 #define umul_ppmm(w1, w0, u, v) \ do { \ UDItype __ll = (UDItype)(u) * (v); \ w1 = __ll >> 32; \ w0 = __ll; \ } while (0) #define UMUL_TIME 10 #define UDIV_TIME 100 #endif /* __mips__ */ /*************************************** ************** MIPS/64 ************** ***************************************/ #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64 #if defined(__mips_isa_rev) && __mips_isa_rev >= 6 && defined(CONFIG_CC_IS_GCC) /* * GCC ends up emitting a __multi3 intrinsic call for MIPS64r6 with the plain C * code below, so we special case MIPS64r6 until the compiler can do better. */ #define umul_ppmm(w1, w0, u, v) \ do { \ __asm__ ("dmulu %0,%1,%2" \ : "=d" ((UDItype)(w0)) \ : "d" ((UDItype)(u)), \ "d" ((UDItype)(v))); \ __asm__ ("dmuhu %0,%1,%2" \ : "=d" ((UDItype)(w1)) \ : "d" ((UDItype)(u)), \ "d" ((UDItype)(v))); \ } while (0) #else #define umul_ppmm(w1, w0, u, v) \ do { \ typedef unsigned int __ll_UTItype __attribute__((mode(TI))); \ __ll_UTItype __ll = (__ll_UTItype)(u) * (v); \ w1 = __ll >> 64; \ w0 = __ll; \ } while (0) #endif #define UMUL_TIME 20 #define UDIV_TIME 140 #endif /* __mips__ */ /*************************************** ************** 32000 **************** ***************************************/ #if defined(__ns32000__) && W_TYPE_SIZE == 32 #define umul_ppmm(w1, w0, u, v) \ ({union {UDItype __ll; \ struct {USItype __l, __h; } __i; \ } __xx; \ __asm__ ("meid %2,%0" \ : "=g" (__xx.__ll) \ : "%0" ((USItype)(u)), \ "g" ((USItype)(v))); \ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; }) #define __umulsidi3(u, v) \ ({UDItype __w; \ __asm__ ("meid %2,%0" \ : "=g" (__w) \ : "%0" ((USItype)(u)), \ "g" ((USItype)(v))); \ __w; }) #define udiv_qrnnd(q, r, n1, n0, d) \ ({union {UDItype __ll; \ struct {USItype __l, __h; } __i; \ } __xx; \ __xx.__i.__h = (n1); __xx.__i.__l = (n0); \ __asm__ ("deid %2,%0" \ : "=g" (__xx.__ll) \ : "0" (__xx.__ll), \ "g" ((USItype)(d))); \ (r) = __xx.__i.__l; (q) = __xx.__i.__h; }) #endif /* __ns32000__ */ /*************************************** ************** PPC ****************** ***************************************/ #if (defined(_ARCH_PPC) || defined(_IBMR2)) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ do { \ if (__builtin_constant_p(bh) && (bh) == 0) \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{aze|addze} %0,%2" \ : "=r" (sh), \ "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ __asm__ ("{a%I4|add%I4c} %1,%3,%4\n\t{ame|addme} %0,%2" \ : "=r" (sh), \ "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ else \ __asm__ ("{a%I5|add%I5c} %1,%4,%5\n\t{ae|adde} %0,%2,%3" \ : "=r" (sh), \ "=&r" (sl) \ : "%r" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "%r" ((USItype)(al)), \ "rI" ((USItype)(bl))); \ } while (0) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ do { \ if (__builtin_constant_p(ah) && (ah) == 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfze|subfze} %0,%2" \ : "=r" (sh), \ "=&r" (sl) \ : "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(ah) && (ah) == ~(USItype) 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{sfme|subfme} %0,%2" \ : "=r" (sh), \ "=&r" (sl) \ : "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{ame|addme} %0,%2" \ : "=r" (sh), \ "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else if (__builtin_constant_p(bh) && (bh) == ~(USItype) 0) \ __asm__ ("{sf%I3|subf%I3c} %1,%4,%3\n\t{aze|addze} %0,%2" \ : "=r" (sh), \ "=&r" (sl) \ : "r" ((USItype)(ah)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ else \ __asm__ ("{sf%I4|subf%I4c} %1,%5,%4\n\t{sfe|subfe} %0,%3,%2" \ : "=r" (sh), \ "=&r" (sl) \ : "r" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "rI" ((USItype)(al)), \ "r" ((USItype)(bl))); \ } while (0) #if defined(_ARCH_PPC) #define umul_ppmm(ph, pl, m0, m1) \ do { \ USItype __m0 = (m0), __m1 = (m1); \ __asm__ ("mulhwu %0,%1,%2" \ : "=r" (ph) \ : "%r" (__m0), \ "r" (__m1)); \ (pl) = __m0 * __m1; \ } while (0) #define UMUL_TIME 15 #define smul_ppmm(ph, pl, m0, m1) \ do { \ SItype __m0 = (m0), __m1 = (m1); \ __asm__ ("mulhw %0,%1,%2" \ : "=r" ((SItype) ph) \ : "%r" (__m0), \ "r" (__m1)); \ (pl) = __m0 * __m1; \ } while (0) #define SMUL_TIME 14 #define UDIV_TIME 120 #else #define umul_ppmm(xh, xl, m0, m1) \ do { \ USItype __m0 = (m0), __m1 = (m1); \ __asm__ ("mul %0,%2,%3" \ : "=r" ((USItype)(xh)), \ "=q" ((USItype)(xl)) \ : "r" (__m0), \ "r" (__m1)); \ (xh) += ((((SItype) __m0 >> 31) & __m1) \ + (((SItype) __m1 >> 31) & __m0)); \ } while (0) #define UMUL_TIME 8 #define smul_ppmm(xh, xl, m0, m1) \ __asm__ ("mul %0,%2,%3" \ : "=r" ((SItype)(xh)), \ "=q" ((SItype)(xl)) \ : "r" (m0), \ "r" (m1)) #define SMUL_TIME 4 #define sdiv_qrnnd(q, r, nh, nl, d) \ __asm__ ("div %0,%2,%4" \ : "=r" ((SItype)(q)), "=q" ((SItype)(r)) \ : "r" ((SItype)(nh)), "1" ((SItype)(nl)), "r" ((SItype)(d))) #define UDIV_TIME 100 #endif #endif /* Power architecture variants. */ /*************************************** ************** PYR ****************** ***************************************/ #if defined(__pyr__) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("addw %5,%1\n" \ "addwc %3,%0" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "%0" ((USItype)(ah)), \ "g" ((USItype)(bh)), \ "%1" ((USItype)(al)), \ "g" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("subw %5,%1\n" \ "subwb %3,%0" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "0" ((USItype)(ah)), \ "g" ((USItype)(bh)), \ "1" ((USItype)(al)), \ "g" ((USItype)(bl))) /* This insn works on Pyramids with AP, XP, or MI CPUs, but not with SP. */ #define umul_ppmm(w1, w0, u, v) \ ({union {UDItype __ll; \ struct {USItype __h, __l; } __i; \ } __xx; \ __asm__ ("movw %1,%R0\n" \ "uemul %2,%0" \ : "=&r" (__xx.__ll) \ : "g" ((USItype) (u)), \ "g" ((USItype)(v))); \ (w1) = __xx.__i.__h; (w0) = __xx.__i.__l; }) #endif /* __pyr__ */ /*************************************** ************** RT/ROMP ************** ***************************************/ #if defined(__ibm032__) /* RT/ROMP */ && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("a %1,%5\n" \ "ae %0,%3" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "%0" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "%1" ((USItype)(al)), \ "r" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("s %1,%5\n" \ "se %0,%3" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "0" ((USItype)(ah)), \ "r" ((USItype)(bh)), \ "1" ((USItype)(al)), \ "r" ((USItype)(bl))) #define umul_ppmm(ph, pl, m0, m1) \ do { \ USItype __m0 = (m0), __m1 = (m1); \ __asm__ ( \ "s r2,r2\n" \ "mts r10,%2\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "m r2,%3\n" \ "cas %0,r2,r0\n" \ "mfs r10,%1" \ : "=r" ((USItype)(ph)), \ "=r" ((USItype)(pl)) \ : "%r" (__m0), \ "r" (__m1) \ : "r2"); \ (ph) += ((((SItype) __m0 >> 31) & __m1) \ + (((SItype) __m1 >> 31) & __m0)); \ } while (0) #define UMUL_TIME 20 #define UDIV_TIME 200 #endif /* RT/ROMP */ /*************************************** ************** SH2 ****************** ***************************************/ #if (defined(__sh2__) || defined(__sh3__) || defined(__SH4__)) \ && W_TYPE_SIZE == 32 #define umul_ppmm(w1, w0, u, v) \ __asm__ ( \ "dmulu.l %2,%3\n" \ "sts macl,%1\n" \ "sts mach,%0" \ : "=r" ((USItype)(w1)), \ "=r" ((USItype)(w0)) \ : "r" ((USItype)(u)), \ "r" ((USItype)(v)) \ : "macl", "mach") #define UMUL_TIME 5 #endif /*************************************** ************** SPARC **************** ***************************************/ #if defined(__sparc__) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("addcc %r4,%5,%1\n" \ "addx %r2,%3,%0" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "%rJ" ((USItype)(ah)), \ "rI" ((USItype)(bh)), \ "%rJ" ((USItype)(al)), \ "rI" ((USItype)(bl)) \ __CLOBBER_CC) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("subcc %r4,%5,%1\n" \ "subx %r2,%3,%0" \ : "=r" ((USItype)(sh)), \ "=&r" ((USItype)(sl)) \ : "rJ" ((USItype)(ah)), \ "rI" ((USItype)(bh)), \ "rJ" ((USItype)(al)), \ "rI" ((USItype)(bl)) \ __CLOBBER_CC) #if defined(__sparc_v8__) /* Don't match immediate range because, 1) it is not often useful, 2) the 'I' flag thinks of the range as a 13 bit signed interval, while we want to match a 13 bit interval, sign extended to 32 bits, but INTERPRETED AS UNSIGNED. */ #define umul_ppmm(w1, w0, u, v) \ __asm__ ("umul %2,%3,%1;rd %%y,%0" \ : "=r" ((USItype)(w1)), \ "=r" ((USItype)(w0)) \ : "r" ((USItype)(u)), \ "r" ((USItype)(v))) #define UMUL_TIME 5 #ifndef SUPERSPARC /* SuperSPARC's udiv only handles 53 bit dividends */ #define udiv_qrnnd(q, r, n1, n0, d) \ do { \ USItype __q; \ __asm__ ("mov %1,%%y;nop;nop;nop;udiv %2,%3,%0" \ : "=r" ((USItype)(__q)) \ : "r" ((USItype)(n1)), \ "r" ((USItype)(n0)), \ "r" ((USItype)(d))); \ (r) = (n0) - __q * (d); \ (q) = __q; \ } while (0) #define UDIV_TIME 25 #endif /* SUPERSPARC */ #else /* ! __sparc_v8__ */ #if defined(__sparclite__) /* This has hardware multiply but not divide. It also has two additional instructions scan (ffs from high bit) and divscc. */ #define umul_ppmm(w1, w0, u, v) \ __asm__ ("umul %2,%3,%1;rd %%y,%0" \ : "=r" ((USItype)(w1)), \ "=r" ((USItype)(w0)) \ : "r" ((USItype)(u)), \ "r" ((USItype)(v))) #define UMUL_TIME 5 #define udiv_qrnnd(q, r, n1, n0, d) \ __asm__ ("! Inlined udiv_qrnnd\n" \ "wr %%g0,%2,%%y ! Not a delayed write for sparclite\n" \ "tst %%g0\n" \ "divscc %3,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%%g1\n" \ "divscc %%g1,%4,%0\n" \ "rd %%y,%1\n" \ "bl,a 1f\n" \ "add %1,%4,%1\n" \ "1: ! End of inline udiv_qrnnd" \ : "=r" ((USItype)(q)), \ "=r" ((USItype)(r)) \ : "r" ((USItype)(n1)), \ "r" ((USItype)(n0)), \ "rI" ((USItype)(d)) \ : "%g1" __AND_CLOBBER_CC) #define UDIV_TIME 37 #endif /* __sparclite__ */ #endif /* __sparc_v8__ */ /* Default to sparc v7 versions of umul_ppmm and udiv_qrnnd. */ #ifndef umul_ppmm #define umul_ppmm(w1, w0, u, v) \ __asm__ ("! Inlined umul_ppmm\n" \ "wr %%g0,%2,%%y ! SPARC has 0-3 delay insn after a wr\n" \ "sra %3,31,%%g2 ! Don't move this insn\n" \ "and %2,%%g2,%%g2 ! Don't move this insn\n" \ "andcc %%g0,0,%%g1 ! Don't move this insn\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,%3,%%g1\n" \ "mulscc %%g1,0,%%g1\n" \ "add %%g1,%%g2,%0\n" \ "rd %%y,%1" \ : "=r" ((USItype)(w1)), \ "=r" ((USItype)(w0)) \ : "%rI" ((USItype)(u)), \ "r" ((USItype)(v)) \ : "%g1", "%g2" __AND_CLOBBER_CC) #define UMUL_TIME 39 /* 39 instructions */ /* It's quite necessary to add this much assembler for the sparc. The default udiv_qrnnd (in C) is more than 10 times slower! */ #define udiv_qrnnd(q, r, n1, n0, d) \ __asm__ ("! Inlined udiv_qrnnd\n\t" \ "mov 32,%%g1\n\t" \ "subcc %1,%2,%%g0\n\t" \ "1: bcs 5f\n\t" \ "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \ "sub %1,%2,%1 ! this kills msb of n\n\t" \ "addx %1,%1,%1 ! so this can't give carry\n\t" \ "subcc %%g1,1,%%g1\n\t" \ "2: bne 1b\n\t" \ "subcc %1,%2,%%g0\n\t" \ "bcs 3f\n\t" \ "addxcc %0,%0,%0 ! shift n1n0 and a q-bit in lsb\n\t" \ "b 3f\n\t" \ "sub %1,%2,%1 ! this kills msb of n\n\t" \ "4: sub %1,%2,%1\n\t" \ "5: addxcc %1,%1,%1\n\t" \ "bcc 2b\n\t" \ "subcc %%g1,1,%%g1\n\t" \ "! Got carry from n. Subtract next step to cancel this carry.\n\t" \ "bne 4b\n\t" \ "addcc %0,%0,%0 ! shift n1n0 and a 0-bit in lsb\n\t" \ "sub %1,%2,%1\n\t" \ "3: xnor %0,0,%0\n\t" \ "! End of inline udiv_qrnnd\n" \ : "=&r" ((USItype)(q)), \ "=&r" ((USItype)(r)) \ : "r" ((USItype)(d)), \ "1" ((USItype)(n1)), \ "0" ((USItype)(n0)) : "%g1", "cc") #define UDIV_TIME (3+7*32) /* 7 instructions/iteration. 32 iterations. */ #endif #endif /* __sparc__ */ /*************************************** ************** VAX ****************** ***************************************/ #if defined(__vax__) && W_TYPE_SIZE == 32 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("addl2 %5,%1\n" \ "adwc %3,%0" \ : "=g" ((USItype)(sh)), \ "=&g" ((USItype)(sl)) \ : "%0" ((USItype)(ah)), \ "g" ((USItype)(bh)), \ "%1" ((USItype)(al)), \ "g" ((USItype)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("subl2 %5,%1\n" \ "sbwc %3,%0" \ : "=g" ((USItype)(sh)), \ "=&g" ((USItype)(sl)) \ : "0" ((USItype)(ah)), \ "g" ((USItype)(bh)), \ "1" ((USItype)(al)), \ "g" ((USItype)(bl))) #define umul_ppmm(xh, xl, m0, m1) \ do { \ union {UDItype __ll; \ struct {USItype __l, __h; } __i; \ } __xx; \ USItype __m0 = (m0), __m1 = (m1); \ __asm__ ("emul %1,%2,$0,%0" \ : "=g" (__xx.__ll) \ : "g" (__m0), \ "g" (__m1)); \ (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ (xh) += ((((SItype) __m0 >> 31) & __m1) \ + (((SItype) __m1 >> 31) & __m0)); \ } while (0) #define sdiv_qrnnd(q, r, n1, n0, d) \ do { \ union {DItype __ll; \ struct {SItype __l, __h; } __i; \ } __xx; \ __xx.__i.__h = n1; __xx.__i.__l = n0; \ __asm__ ("ediv %3,%2,%0,%1" \ : "=g" (q), "=g" (r) \ : "g" (__xx.__ll), "g" (d)); \ } while (0) #endif /* __vax__ */ /*************************************** ************** Z8000 **************** ***************************************/ #if defined(__z8000__) && W_TYPE_SIZE == 16 #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ __asm__ ("add %H1,%H5\n\tadc %H0,%H3" \ : "=r" ((unsigned int)(sh)), \ "=&r" ((unsigned int)(sl)) \ : "%0" ((unsigned int)(ah)), \ "r" ((unsigned int)(bh)), \ "%1" ((unsigned int)(al)), \ "rQR" ((unsigned int)(bl))) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ __asm__ ("sub %H1,%H5\n\tsbc %H0,%H3" \ : "=r" ((unsigned int)(sh)), \ "=&r" ((unsigned int)(sl)) \ : "0" ((unsigned int)(ah)), \ "r" ((unsigned int)(bh)), \ "1" ((unsigned int)(al)), \ "rQR" ((unsigned int)(bl))) #define umul_ppmm(xh, xl, m0, m1) \ do { \ union {long int __ll; \ struct {unsigned int __h, __l; } __i; \ } __xx; \ unsigned int __m0 = (m0), __m1 = (m1); \ __asm__ ("mult %S0,%H3" \ : "=r" (__xx.__i.__h), \ "=r" (__xx.__i.__l) \ : "%1" (__m0), \ "rQR" (__m1)); \ (xh) = __xx.__i.__h; (xl) = __xx.__i.__l; \ (xh) += ((((signed int) __m0 >> 15) & __m1) \ + (((signed int) __m1 >> 15) & __m0)); \ } while (0) #endif /* __z8000__ */ #endif /* __GNUC__ */ /*************************************** *********** Generic Versions ******** ***************************************/ #if !defined(umul_ppmm) && defined(__umulsidi3) #define umul_ppmm(ph, pl, m0, m1) \ { \ UDWtype __ll = __umulsidi3(m0, m1); \ ph = (UWtype) (__ll >> W_TYPE_SIZE); \ pl = (UWtype) __ll; \ } #endif #if !defined(__umulsidi3) #define __umulsidi3(u, v) \ ({UWtype __hi, __lo; \ umul_ppmm(__hi, __lo, u, v); \ ((UDWtype) __hi << W_TYPE_SIZE) | __lo; }) #endif /* If this machine has no inline assembler, use C macros. */ #if !defined(add_ssaaaa) #define add_ssaaaa(sh, sl, ah, al, bh, bl) \ do { \ UWtype __x; \ __x = (al) + (bl); \ (sh) = (ah) + (bh) + (__x < (al)); \ (sl) = __x; \ } while (0) #endif #if !defined(sub_ddmmss) #define sub_ddmmss(sh, sl, ah, al, bh, bl) \ do { \ UWtype __x; \ __x = (al) - (bl); \ (sh) = (ah) - (bh) - (__x > (al)); \ (sl) = __x; \ } while (0) #endif #if !defined(umul_ppmm) #define umul_ppmm(w1, w0, u, v) \ do { \ UWtype __x0, __x1, __x2, __x3; \ UHWtype __ul, __vl, __uh, __vh; \ UWtype __u = (u), __v = (v); \ \ __ul = __ll_lowpart(__u); \ __uh = __ll_highpart(__u); \ __vl = __ll_lowpart(__v); \ __vh = __ll_highpart(__v); \ \ __x0 = (UWtype) __ul * __vl; \ __x1 = (UWtype) __ul * __vh; \ __x2 = (UWtype) __uh * __vl; \ __x3 = (UWtype) __uh * __vh; \ \ __x1 += __ll_highpart(__x0);/* this can't give carry */ \ __x1 += __x2; /* but this indeed can */ \ if (__x1 < __x2) /* did we get it? */ \ __x3 += __ll_B; /* yes, add it in the proper pos. */ \ \ (w1) = __x3 + __ll_highpart(__x1); \ (w0) = (__ll_lowpart(__x1) << W_TYPE_SIZE/2) + __ll_lowpart(__x0); \ } while (0) #endif #if !defined(umul_ppmm) #define smul_ppmm(w1, w0, u, v) \ do { \ UWtype __w1; \ UWtype __m0 = (u), __m1 = (v); \ umul_ppmm(__w1, w0, __m0, __m1); \ (w1) = __w1 - (-(__m0 >> (W_TYPE_SIZE - 1)) & __m1) \ - (-(__m1 >> (W_TYPE_SIZE - 1)) & __m0); \ } while (0) #endif /* Define this unconditionally, so it can be used for debugging. */ #define __udiv_qrnnd_c(q, r, n1, n0, d) \ do { \ UWtype __d1, __d0, __q1, __q0, __r1, __r0, __m; \ __d1 = __ll_highpart(d); \ __d0 = __ll_lowpart(d); \ \ __r1 = (n1) % __d1; \ __q1 = (n1) / __d1; \ __m = (UWtype) __q1 * __d0; \ __r1 = __r1 * __ll_B | __ll_highpart(n0); \ if (__r1 < __m) { \ __q1--, __r1 += (d); \ if (__r1 >= (d)) /* i.e. we didn't get carry when adding to __r1 */ \ if (__r1 < __m) \ __q1--, __r1 += (d); \ } \ __r1 -= __m; \ \ __r0 = __r1 % __d1; \ __q0 = __r1 / __d1; \ __m = (UWtype) __q0 * __d0; \ __r0 = __r0 * __ll_B | __ll_lowpart(n0); \ if (__r0 < __m) { \ __q0--, __r0 += (d); \ if (__r0 >= (d)) \ if (__r0 < __m) \ __q0--, __r0 += (d); \ } \ __r0 -= __m; \ \ (q) = (UWtype) __q1 * __ll_B | __q0; \ (r) = __r0; \ } while (0) /* If the processor has no udiv_qrnnd but sdiv_qrnnd, go through __udiv_w_sdiv (defined in libgcc or elsewhere). */ #if !defined(udiv_qrnnd) && defined(sdiv_qrnnd) #define udiv_qrnnd(q, r, nh, nl, d) \ do { \ UWtype __r; \ (q) = __MPN(udiv_w_sdiv) (&__r, nh, nl, d); \ (r) = __r; \ } while (0) #endif /* If udiv_qrnnd was not defined for this processor, use __udiv_qrnnd_c. */ #if !defined(udiv_qrnnd) #define UDIV_NEEDS_NORMALIZATION 1 #define udiv_qrnnd __udiv_qrnnd_c #endif #ifndef UDIV_NEEDS_NORMALIZATION #define UDIV_NEEDS_NORMALIZATION 0 #endif
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2023 Intel Corporation */ #ifndef _XE_TTM_SYS_MGR_H_ #define _XE_TTM_SYS_MGR_H_ struct xe_device; int xe_ttm_sys_mgr_init(struct xe_device *xe); #endif
/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */ /* * PTP 1588 clock support - user space interface * * Copyright (C) 2010 OMICRON electronics GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifndef _PTP_CLOCK_H_ #define _PTP_CLOCK_H_ #include <linux/ioctl.h> #include <linux/types.h> /* * Bits of the ptp_extts_request.flags field: */ #define PTP_ENABLE_FEATURE (1<<0) #define PTP_RISING_EDGE (1<<1) #define PTP_FALLING_EDGE (1<<2) #define PTP_STRICT_FLAGS (1<<3) #define PTP_EXT_OFFSET (1<<4) #define PTP_EXTTS_EDGES (PTP_RISING_EDGE | PTP_FALLING_EDGE) /* * flag fields valid for the new PTP_EXTTS_REQUEST2 ioctl. */ #define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \ PTP_RISING_EDGE | \ PTP_FALLING_EDGE | \ PTP_STRICT_FLAGS | \ PTP_EXT_OFFSET) /* * flag fields valid for the original PTP_EXTTS_REQUEST ioctl. * DO NOT ADD NEW FLAGS HERE. */ #define PTP_EXTTS_V1_VALID_FLAGS (PTP_ENABLE_FEATURE | \ PTP_RISING_EDGE | \ PTP_FALLING_EDGE) /* * flag fields valid for the ptp_extts_event report. */ #define PTP_EXTTS_EVENT_VALID (PTP_ENABLE_FEATURE) /* * Bits of the ptp_perout_request.flags field: */ #define PTP_PEROUT_ONE_SHOT (1<<0) #define PTP_PEROUT_DUTY_CYCLE (1<<1) #define PTP_PEROUT_PHASE (1<<2) /* * flag fields valid for the new PTP_PEROUT_REQUEST2 ioctl. */ #define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT | \ PTP_PEROUT_DUTY_CYCLE | \ PTP_PEROUT_PHASE) /* * No flags are valid for the original PTP_PEROUT_REQUEST ioctl */ #define PTP_PEROUT_V1_VALID_FLAGS (0) /* * struct ptp_clock_time - represents a time value * * The sign of the seconds field applies to the whole value. The * nanoseconds field is always unsigned. The reserved field is * included for sub-nanosecond resolution, should the demand for * this ever appear. * */ struct ptp_clock_time { __s64 sec; /* seconds */ __u32 nsec; /* nanoseconds */ __u32 reserved; }; struct ptp_clock_caps { int max_adj; /* Maximum frequency adjustment in parts per billon. */ int n_alarm; /* Number of programmable alarms. */ int n_ext_ts; /* Number of external time stamp channels. */ int n_per_out; /* Number of programmable periodic signals. */ int pps; /* Whether the clock supports a PPS callback. */ int n_pins; /* Number of input/output pins. */ /* Whether the clock supports precise system-device cross timestamps */ int cross_timestamping; /* Whether the clock supports adjust phase */ int adjust_phase; int max_phase_adj; /* Maximum phase adjustment in nanoseconds. */ int rsv[11]; /* Reserved for future use. */ }; struct ptp_extts_request { unsigned int index; /* Which channel to configure. */ unsigned int flags; /* Bit field for PTP_xxx flags. */ unsigned int rsv[2]; /* Reserved for future use. */ }; struct ptp_perout_request { union { /* * Absolute start time. * Valid only if (flags & PTP_PEROUT_PHASE) is unset. */ struct ptp_clock_time start; /* * Phase offset. The signal should start toggling at an * unspecified integer multiple of the period, plus this value. * The start time should be "as soon as possible". * Valid only if (flags & PTP_PEROUT_PHASE) is set. */ struct ptp_clock_time phase; }; struct ptp_clock_time period; /* Desired period, zero means disable. */ unsigned int index; /* Which channel to configure. */ unsigned int flags; union { /* * The "on" time of the signal. * Must be lower than the period. * Valid only if (flags & PTP_PEROUT_DUTY_CYCLE) is set. */ struct ptp_clock_time on; /* Reserved for future use. */ unsigned int rsv[4]; }; }; #define PTP_MAX_SAMPLES 25 /* Maximum allowed offset measurement samples. */ struct ptp_sys_offset { unsigned int n_samples; /* Desired number of measurements. */ unsigned int rsv[3]; /* Reserved for future use. */ /* * Array of interleaved system/phc time stamps. The kernel * will provide 2*n_samples + 1 time stamps, with the last * one as a system time stamp. */ struct ptp_clock_time ts[2 * PTP_MAX_SAMPLES + 1]; }; /* * ptp_sys_offset_extended - data structure for IOCTL operation * PTP_SYS_OFFSET_EXTENDED * * @n_samples: Desired number of measurements. * @clockid: clockid of a clock-base used for pre/post timestamps. * @rsv: Reserved for future use. * @ts: Array of samples in the form [pre-TS, PHC, post-TS]. The * kernel provides @n_samples. * * Starting from kernel 6.12 and onwards, the first word of the reserved-field * is used for @clockid. That's backward compatible since previous kernel * expect all three reserved words (@rsv[3]) to be 0 while the clockid (first * word in the new structure) for CLOCK_REALTIME is '0'. */ struct ptp_sys_offset_extended { unsigned int n_samples; __kernel_clockid_t clockid; unsigned int rsv[2]; struct ptp_clock_time ts[PTP_MAX_SAMPLES][3]; }; struct ptp_sys_offset_precise { struct ptp_clock_time device; struct ptp_clock_time sys_realtime; struct ptp_clock_time sys_monoraw; unsigned int rsv[4]; /* Reserved for future use. */ }; enum ptp_pin_function { PTP_PF_NONE, PTP_PF_EXTTS, PTP_PF_PEROUT, PTP_PF_PHYSYNC, }; struct ptp_pin_desc { /* * Hardware specific human readable pin name. This field is * set by the kernel during the PTP_PIN_GETFUNC ioctl and is * ignored for the PTP_PIN_SETFUNC ioctl. */ char name[64]; /* * Pin index in the range of zero to ptp_clock_caps.n_pins - 1. */ unsigned int index; /* * Which of the PTP_PF_xxx functions to use on this pin. */ unsigned int func; /* * The specific channel to use for this function. * This corresponds to the 'index' field of the * PTP_EXTTS_REQUEST and PTP_PEROUT_REQUEST ioctls. */ unsigned int chan; /* * Reserved for future use. */ unsigned int rsv[5]; }; #define PTP_CLK_MAGIC '=' #define PTP_CLOCK_GETCAPS _IOR(PTP_CLK_MAGIC, 1, struct ptp_clock_caps) #define PTP_EXTTS_REQUEST _IOW(PTP_CLK_MAGIC, 2, struct ptp_extts_request) #define PTP_PEROUT_REQUEST _IOW(PTP_CLK_MAGIC, 3, struct ptp_perout_request) #define PTP_ENABLE_PPS _IOW(PTP_CLK_MAGIC, 4, int) #define PTP_SYS_OFFSET _IOW(PTP_CLK_MAGIC, 5, struct ptp_sys_offset) #define PTP_PIN_GETFUNC _IOWR(PTP_CLK_MAGIC, 6, struct ptp_pin_desc) #define PTP_PIN_SETFUNC _IOW(PTP_CLK_MAGIC, 7, struct ptp_pin_desc) #define PTP_SYS_OFFSET_PRECISE \ _IOWR(PTP_CLK_MAGIC, 8, struct ptp_sys_offset_precise) #define PTP_SYS_OFFSET_EXTENDED \ _IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended) #define PTP_CLOCK_GETCAPS2 _IOR(PTP_CLK_MAGIC, 10, struct ptp_clock_caps) #define PTP_EXTTS_REQUEST2 _IOW(PTP_CLK_MAGIC, 11, struct ptp_extts_request) #define PTP_PEROUT_REQUEST2 _IOW(PTP_CLK_MAGIC, 12, struct ptp_perout_request) #define PTP_ENABLE_PPS2 _IOW(PTP_CLK_MAGIC, 13, int) #define PTP_SYS_OFFSET2 _IOW(PTP_CLK_MAGIC, 14, struct ptp_sys_offset) #define PTP_PIN_GETFUNC2 _IOWR(PTP_CLK_MAGIC, 15, struct ptp_pin_desc) #define PTP_PIN_SETFUNC2 _IOW(PTP_CLK_MAGIC, 16, struct ptp_pin_desc) #define PTP_SYS_OFFSET_PRECISE2 \ _IOWR(PTP_CLK_MAGIC, 17, struct ptp_sys_offset_precise) #define PTP_SYS_OFFSET_EXTENDED2 \ _IOWR(PTP_CLK_MAGIC, 18, struct ptp_sys_offset_extended) #define PTP_MASK_CLEAR_ALL _IO(PTP_CLK_MAGIC, 19) #define PTP_MASK_EN_SINGLE _IOW(PTP_CLK_MAGIC, 20, unsigned int) struct ptp_extts_event { struct ptp_clock_time t; /* Time event occurred. */ unsigned int index; /* Which channel produced the event. */ unsigned int flags; /* Event type. */ unsigned int rsv[2]; /* Reserved for future use. */ }; #endif
/* SPDX-License-Identifier: GPL-2.0+ */ /* * Copyright (C) 2018 Exceet Electronics GmbH * Copyright (C) 2018 Bootlin * * Author: * Peter Pan <[email protected]> * Boris Brezillon <[email protected]> */ #ifndef __LINUX_SPI_MEM_H #define __LINUX_SPI_MEM_H #include <linux/spi/spi.h> #define SPI_MEM_OP_CMD(__opcode, __buswidth) \ { \ .buswidth = __buswidth, \ .opcode = __opcode, \ .nbytes = 1, \ } #define SPI_MEM_OP_ADDR(__nbytes, __val, __buswidth) \ { \ .nbytes = __nbytes, \ .val = __val, \ .buswidth = __buswidth, \ } #define SPI_MEM_OP_NO_ADDR { } #define SPI_MEM_OP_DUMMY(__nbytes, __buswidth) \ { \ .nbytes = __nbytes, \ .buswidth = __buswidth, \ } #define SPI_MEM_OP_NO_DUMMY { } #define SPI_MEM_OP_DATA_IN(__nbytes, __buf, __buswidth) \ { \ .dir = SPI_MEM_DATA_IN, \ .nbytes = __nbytes, \ .buf.in = __buf, \ .buswidth = __buswidth, \ } #define SPI_MEM_OP_DATA_OUT(__nbytes, __buf, __buswidth) \ { \ .dir = SPI_MEM_DATA_OUT, \ .nbytes = __nbytes, \ .buf.out = __buf, \ .buswidth = __buswidth, \ } #define SPI_MEM_OP_NO_DATA { } /** * enum spi_mem_data_dir - describes the direction of a SPI memory data * transfer from the controller perspective * @SPI_MEM_NO_DATA: no data transferred * @SPI_MEM_DATA_IN: data coming from the SPI memory * @SPI_MEM_DATA_OUT: data sent to the SPI memory */ enum spi_mem_data_dir { SPI_MEM_NO_DATA, SPI_MEM_DATA_IN, SPI_MEM_DATA_OUT, }; /** * struct spi_mem_op - describes a SPI memory operation * @cmd.nbytes: number of opcode bytes (only 1 or 2 are valid). The opcode is * sent MSB-first. * @cmd.buswidth: number of IO lines used to transmit the command * @cmd.opcode: operation opcode * @cmd.dtr: whether the command opcode should be sent in DTR mode or not * @addr.nbytes: number of address bytes to send. Can be zero if the operation * does not need to send an address * @addr.buswidth: number of IO lines used to transmit the address cycles * @addr.dtr: whether the address should be sent in DTR mode or not * @addr.val: address value. This value is always sent MSB first on the bus. * Note that only @addr.nbytes are taken into account in this * address value, so users should make sure the value fits in the * assigned number of bytes. * @dummy.nbytes: number of dummy bytes to send after an opcode or address. Can * be zero if the operation does not require dummy bytes * @dummy.buswidth: number of IO lanes used to transmit the dummy bytes * @dummy.dtr: whether the dummy bytes should be sent in DTR mode or not * @data.buswidth: number of IO lanes used to send/receive the data * @data.dtr: whether the data should be sent in DTR mode or not * @data.ecc: whether error correction is required or not * @data.swap16: whether the byte order of 16-bit words is swapped when read * or written in Octal DTR mode compared to STR mode. * @data.dir: direction of the transfer * @data.nbytes: number of data bytes to send/receive. Can be zero if the * operation does not involve transferring data * @data.buf.in: input buffer (must be DMA-able) * @data.buf.out: output buffer (must be DMA-able) */ struct spi_mem_op { struct { u8 nbytes; u8 buswidth; u8 dtr : 1; u8 __pad : 7; u16 opcode; } cmd; struct { u8 nbytes; u8 buswidth; u8 dtr : 1; u8 __pad : 7; u64 val; } addr; struct { u8 nbytes; u8 buswidth; u8 dtr : 1; u8 __pad : 7; } dummy; struct { u8 buswidth; u8 dtr : 1; u8 ecc : 1; u8 swap16 : 1; u8 __pad : 5; enum spi_mem_data_dir dir; unsigned int nbytes; union { void *in; const void *out; } buf; } data; }; #define SPI_MEM_OP(__cmd, __addr, __dummy, __data) \ { \ .cmd = __cmd, \ .addr = __addr, \ .dummy = __dummy, \ .data = __data, \ } /** * struct spi_mem_dirmap_info - Direct mapping information * @op_tmpl: operation template that should be used by the direct mapping when * the memory device is accessed * @offset: absolute offset this direct mapping is pointing to * @length: length in byte of this direct mapping * * These information are used by the controller specific implementation to know * the portion of memory that is directly mapped and the spi_mem_op that should * be used to access the device. * A direct mapping is only valid for one direction (read or write) and this * direction is directly encoded in the ->op_tmpl.data.dir field. */ struct spi_mem_dirmap_info { struct spi_mem_op op_tmpl; u64 offset; u64 length; }; /** * struct spi_mem_dirmap_desc - Direct mapping descriptor * @mem: the SPI memory device this direct mapping is attached to * @info: information passed at direct mapping creation time * @nodirmap: set to 1 if the SPI controller does not implement * ->mem_ops->dirmap_create() or when this function returned an * error. If @nodirmap is true, all spi_mem_dirmap_{read,write}() * calls will use spi_mem_exec_op() to access the memory. This is a * degraded mode that allows spi_mem drivers to use the same code * no matter whether the controller supports direct mapping or not * @priv: field pointing to controller specific data * * Common part of a direct mapping descriptor. This object is created by * spi_mem_dirmap_create() and controller implementation of ->create_dirmap() * can create/attach direct mapping resources to the descriptor in the ->priv * field. */ struct spi_mem_dirmap_desc { struct spi_mem *mem; struct spi_mem_dirmap_info info; unsigned int nodirmap; void *priv; }; /** * struct spi_mem - describes a SPI memory device * @spi: the underlying SPI device * @drvpriv: spi_mem_driver private data * @name: name of the SPI memory device * * Extra information that describe the SPI memory device and may be needed by * the controller to properly handle this device should be placed here. * * One example would be the device size since some controller expose their SPI * mem devices through a io-mapped region. */ struct spi_mem { struct spi_device *spi; void *drvpriv; const char *name; }; /** * struct spi_mem_set_drvdata() - attach driver private data to a SPI mem * device * @mem: memory device * @data: data to attach to the memory device */ static inline void spi_mem_set_drvdata(struct spi_mem *mem, void *data) { mem->drvpriv = data; } /** * struct spi_mem_get_drvdata() - get driver private data attached to a SPI mem * device * @mem: memory device * * Return: the data attached to the mem device. */ static inline void *spi_mem_get_drvdata(struct spi_mem *mem) { return mem->drvpriv; } /** * struct spi_controller_mem_ops - SPI memory operations * @adjust_op_size: shrink the data xfer of an operation to match controller's * limitations (can be alignment or max RX/TX size * limitations) * @supports_op: check if an operation is supported by the controller * @exec_op: execute a SPI memory operation * not all driver provides supports_op(), so it can return -EOPNOTSUPP * if the op is not supported by the driver/controller * @get_name: get a custom name for the SPI mem device from the controller. * This might be needed if the controller driver has been ported * to use the SPI mem layer and a custom name is used to keep * mtdparts compatible. * Note that if the implementation of this function allocates memory * dynamically, then it should do so with devm_xxx(), as we don't * have a ->free_name() function. * @dirmap_create: create a direct mapping descriptor that can later be used to * access the memory device. This method is optional * @dirmap_destroy: destroy a memory descriptor previous created by * ->dirmap_create() * @dirmap_read: read data from the memory device using the direct mapping * created by ->dirmap_create(). The function can return less * data than requested (for example when the request is crossing * the currently mapped area), and the caller of * spi_mem_dirmap_read() is responsible for calling it again in * this case. * @dirmap_write: write data to the memory device using the direct mapping * created by ->dirmap_create(). The function can return less * data than requested (for example when the request is crossing * the currently mapped area), and the caller of * spi_mem_dirmap_write() is responsible for calling it again in * this case. * @poll_status: poll memory device status until (status & mask) == match or * when the timeout has expired. It fills the data buffer with * the last status value. * * This interface should be implemented by SPI controllers providing an * high-level interface to execute SPI memory operation, which is usually the * case for QSPI controllers. * * Note on ->dirmap_{read,write}(): drivers should avoid accessing the direct * mapping from the CPU because doing that can stall the CPU waiting for the * SPI mem transaction to finish, and this will make real-time maintainers * unhappy and might make your system less reactive. Instead, drivers should * use DMA to access this direct mapping. */ struct spi_controller_mem_ops { int (*adjust_op_size)(struct spi_mem *mem, struct spi_mem_op *op); bool (*supports_op)(struct spi_mem *mem, const struct spi_mem_op *op); int (*exec_op)(struct spi_mem *mem, const struct spi_mem_op *op); const char *(*get_name)(struct spi_mem *mem); int (*dirmap_create)(struct spi_mem_dirmap_desc *desc); void (*dirmap_destroy)(struct spi_mem_dirmap_desc *desc); ssize_t (*dirmap_read)(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf); ssize_t (*dirmap_write)(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, const void *buf); int (*poll_status)(struct spi_mem *mem, const struct spi_mem_op *op, u16 mask, u16 match, unsigned long initial_delay_us, unsigned long polling_rate_us, unsigned long timeout_ms); }; /** * struct spi_controller_mem_caps - SPI memory controller capabilities * @dtr: Supports DTR operations * @ecc: Supports operations with error correction * @swap16: Supports swapping bytes on a 16 bit boundary when configured in * Octal DTR */ struct spi_controller_mem_caps { bool dtr; bool ecc; bool swap16; }; #define spi_mem_controller_is_capable(ctlr, cap) \ ((ctlr)->mem_caps && (ctlr)->mem_caps->cap) /** * struct spi_mem_driver - SPI memory driver * @spidrv: inherit from a SPI driver * @probe: probe a SPI memory. Usually where detection/initialization takes * place * @remove: remove a SPI memory * @shutdown: take appropriate action when the system is shutdown * * This is just a thin wrapper around a spi_driver. The core takes care of * allocating the spi_mem object and forwarding the probe/remove/shutdown * request to the spi_mem_driver. The reason we use this wrapper is because * we might have to stuff more information into the spi_mem struct to let * SPI controllers know more about the SPI memory they interact with, and * having this intermediate layer allows us to do that without adding more * useless fields to the spi_device object. */ struct spi_mem_driver { struct spi_driver spidrv; int (*probe)(struct spi_mem *mem); int (*remove)(struct spi_mem *mem); void (*shutdown)(struct spi_mem *mem); }; #if IS_ENABLED(CONFIG_SPI_MEM) int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, const struct spi_mem_op *op, struct sg_table *sg); void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, const struct spi_mem_op *op, struct sg_table *sg); bool spi_mem_default_supports_op(struct spi_mem *mem, const struct spi_mem_op *op); #else static inline int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, const struct spi_mem_op *op, struct sg_table *sg) { return -ENOTSUPP; } static inline void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, const struct spi_mem_op *op, struct sg_table *sg) { } static inline bool spi_mem_default_supports_op(struct spi_mem *mem, const struct spi_mem_op *op) { return false; } #endif /* CONFIG_SPI_MEM */ int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op); bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op); int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op); const char *spi_mem_get_name(struct spi_mem *mem); struct spi_mem_dirmap_desc * spi_mem_dirmap_create(struct spi_mem *mem, const struct spi_mem_dirmap_info *info); void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc); ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, void *buf); ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, u64 offs, size_t len, const void *buf); struct spi_mem_dirmap_desc * devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, const struct spi_mem_dirmap_info *info); void devm_spi_mem_dirmap_destroy(struct device *dev, struct spi_mem_dirmap_desc *desc); int spi_mem_poll_status(struct spi_mem *mem, const struct spi_mem_op *op, u16 mask, u16 match, unsigned long initial_delay_us, unsigned long polling_delay_us, u16 timeout_ms); int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, struct module *owner); void spi_mem_driver_unregister(struct spi_mem_driver *drv); #define spi_mem_driver_register(__drv) \ spi_mem_driver_register_with_owner(__drv, THIS_MODULE) #define module_spi_mem_driver(__drv) \ module_driver(__drv, spi_mem_driver_register, \ spi_mem_driver_unregister) #endif /* __LINUX_SPI_MEM_H */
// SPDX-License-Identifier: GPL-2.0+ OR MIT // // Copyright 2016 Freescale Semiconductor, Inc. #include "imx6q.dtsi" / { soc { ocram2: sram@940000 { compatible = "mmio-sram"; reg = <0x00940000 0x20000>; ranges = <0 0x00940000 0x20000>; #address-cells = <1>; #size-cells = <1>; clocks = <&clks IMX6QDL_CLK_OCRAM>; }; ocram3: sram@960000 { compatible = "mmio-sram"; reg = <0x00960000 0x20000>; ranges = <0 0x00960000 0x20000>; #address-cells = <1>; #size-cells = <1>; clocks = <&clks IMX6QDL_CLK_OCRAM>; }; bus@2100000 { pre1: pre@21c8000 { compatible = "fsl,imx6qp-pre"; reg = <0x021c8000 0x1000>; interrupts = <GIC_SPI 90 IRQ_TYPE_EDGE_RISING>; clocks = <&clks IMX6QDL_CLK_PRE0>; clock-names = "axi"; fsl,iram = <&ocram2>; }; pre2: pre@21c9000 { compatible = "fsl,imx6qp-pre"; reg = <0x021c9000 0x1000>; interrupts = <GIC_SPI 97 IRQ_TYPE_EDGE_RISING>; clocks = <&clks IMX6QDL_CLK_PRE1>; clock-names = "axi"; fsl,iram = <&ocram2>; }; pre3: pre@21ca000 { compatible = "fsl,imx6qp-pre"; reg = <0x021ca000 0x1000>; interrupts = <GIC_SPI 98 IRQ_TYPE_EDGE_RISING>; clocks = <&clks IMX6QDL_CLK_PRE2>; clock-names = "axi"; fsl,iram = <&ocram3>; }; pre4: pre@21cb000 { compatible = "fsl,imx6qp-pre"; reg = <0x021cb000 0x1000>; interrupts = <GIC_SPI 99 IRQ_TYPE_EDGE_RISING>; clocks = <&clks IMX6QDL_CLK_PRE3>; clock-names = "axi"; fsl,iram = <&ocram3>; }; prg1: prg@21cc000 { compatible = "fsl,imx6qp-prg"; reg = <0x021cc000 0x1000>; clocks = <&clks IMX6QDL_CLK_PRG0_APB>, <&clks IMX6QDL_CLK_PRG0_AXI>; clock-names = "ipg", "axi"; fsl,pres = <&pre1>, <&pre2>, <&pre3>; }; prg2: prg@21cd000 { compatible = "fsl,imx6qp-prg"; reg = <0x021cd000 0x1000>; clocks = <&clks IMX6QDL_CLK_PRG1_APB>, <&clks IMX6QDL_CLK_PRG1_AXI>; clock-names = "ipg", "axi"; fsl,pres = <&pre4>, <&pre2>, <&pre3>; }; }; }; }; &fec { interrupts = <0 118 IRQ_TYPE_LEVEL_HIGH>, <0 119 IRQ_TYPE_LEVEL_HIGH>; }; &gpc { compatible = "fsl,imx6qp-gpc", "fsl,imx6q-gpc"; }; &ipu1 { compatible = "fsl,imx6qp-ipu", "fsl,imx6q-ipu"; fsl,prg = <&prg1>; }; &ipu2 { compatible = "fsl,imx6qp-ipu", "fsl,imx6q-ipu"; fsl,prg = <&prg2>; }; &ldb { clocks = <&clks IMX6QDL_CLK_LDB_DI0_SEL>, <&clks IMX6QDL_CLK_LDB_DI1_SEL>, <&clks IMX6QDL_CLK_IPU1_DI0_SEL>, <&clks IMX6QDL_CLK_IPU1_DI1_SEL>, <&clks IMX6QDL_CLK_IPU2_DI0_SEL>, <&clks IMX6QDL_CLK_IPU2_DI1_SEL>, <&clks IMX6QDL_CLK_LDB_DI0_PODF>, <&clks IMX6QDL_CLK_LDB_DI1_PODF>; clock-names = "di0_pll", "di1_pll", "di0_sel", "di1_sel", "di2_sel", "di3_sel", "di0", "di1"; }; &mmdc0 { compatible = "fsl,imx6qp-mmdc", "fsl,imx6q-mmdc"; }; &pcie { compatible = "fsl,imx6qp-pcie"; };
/* * Copyright 2015 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ #ifndef __DAL_HW_SHARED_H__ #define __DAL_HW_SHARED_H__ #include "os_types.h" #include "fixed31_32.h" #include "dc_hw_types.h" /****************************************************************************** * Data types shared between different Virtual HW blocks ******************************************************************************/ #define MAX_AUDIOS 7 /** * @MAX_PIPES: * * Every ASIC support a fixed number of pipes; MAX_PIPES defines a large number * to be used inside loops and for determining array sizes. */ #define MAX_PIPES 6 #define MAX_PHANTOM_PIPES (MAX_PIPES / 2) #define MAX_LINKS (MAX_PIPES * 2 +2) #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 #define MAX_HPO_DP2_ENCODERS 4 #define MAX_HPO_DP2_LINK_ENCODERS 4 struct gamma_curve { uint32_t offset; uint32_t segments_num; }; struct curve_points { struct fixed31_32 x; struct fixed31_32 y; struct fixed31_32 offset; struct fixed31_32 slope; uint32_t custom_float_x; uint32_t custom_float_y; uint32_t custom_float_offset; uint32_t custom_float_slope; }; struct curve_points3 { struct curve_points red; struct curve_points green; struct curve_points blue; }; struct pwl_result_data { struct fixed31_32 red; struct fixed31_32 green; struct fixed31_32 blue; struct fixed31_32 delta_red; struct fixed31_32 delta_green; struct fixed31_32 delta_blue; uint32_t red_reg; uint32_t green_reg; uint32_t blue_reg; uint32_t delta_red_reg; uint32_t delta_green_reg; uint32_t delta_blue_reg; }; struct dc_rgb { uint32_t red; uint32_t green; uint32_t blue; }; struct tetrahedral_17x17x17 { struct dc_rgb lut0[1229]; struct dc_rgb lut1[1228]; struct dc_rgb lut2[1228]; struct dc_rgb lut3[1228]; }; struct tetrahedral_9x9x9 { struct dc_rgb lut0[183]; struct dc_rgb lut1[182]; struct dc_rgb lut2[182]; struct dc_rgb lut3[182]; }; struct tetrahedral_params { union { struct tetrahedral_17x17x17 tetrahedral_17; struct tetrahedral_9x9x9 tetrahedral_9; }; bool use_tetrahedral_9; bool use_12bits; }; /* arr_curve_points - regamma regions/segments specification * arr_points - beginning and end point specified separately (only one on DCE) * corner_points - beginning and end point for all 3 colors (DCN) * rgb_resulted - final curve */ struct pwl_params { struct gamma_curve arr_curve_points[34]; union { struct curve_points arr_points[2]; struct curve_points3 corner_points[2]; }; struct pwl_result_data rgb_resulted[256 + 3]; uint32_t hw_points_num; }; /* move to dpp * while we are moving functionality out of opp to dpp to align * HW programming to HW IP, we define these struct in hw_shared * so we can still compile while refactoring */ enum lb_pixel_depth { /* do not change the values because it is used as bit vector */ LB_PIXEL_DEPTH_18BPP = 1, LB_PIXEL_DEPTH_24BPP = 2, LB_PIXEL_DEPTH_30BPP = 4, LB_PIXEL_DEPTH_36BPP = 8 }; enum graphics_csc_adjust_type { GRAPHICS_CSC_ADJUST_TYPE_BYPASS = 0, GRAPHICS_CSC_ADJUST_TYPE_HW, /* without adjustments */ GRAPHICS_CSC_ADJUST_TYPE_SW /*use adjustments */ }; enum ipp_degamma_mode { IPP_DEGAMMA_MODE_BYPASS, IPP_DEGAMMA_MODE_HW_sRGB, IPP_DEGAMMA_MODE_HW_xvYCC, IPP_DEGAMMA_MODE_USER_PWL }; enum gamcor_mode { GAMCOR_MODE_BYPASS, GAMCOR_MODE_RESERVED_1, GAMCOR_MODE_USER_PWL, GAMCOR_MODE_RESERVED_3 }; enum ipp_output_format { IPP_OUTPUT_FORMAT_12_BIT_FIX, IPP_OUTPUT_FORMAT_16_BIT_BYPASS, IPP_OUTPUT_FORMAT_FLOAT }; enum expansion_mode { EXPANSION_MODE_DYNAMIC, EXPANSION_MODE_ZERO }; struct default_adjustment { enum lb_pixel_depth lb_color_depth; enum dc_color_space out_color_space; enum dc_color_space in_color_space; enum dc_color_depth color_depth; enum pixel_format surface_pixel_format; enum graphics_csc_adjust_type csc_adjust_type; bool force_hw_default; }; struct out_csc_color_matrix { enum dc_color_space color_space; uint16_t regval[12]; }; enum gamut_remap_select { GAMUT_REMAP_BYPASS = 0, GAMUT_REMAP_COEFF, GAMUT_REMAP_COMA_COEFF, GAMUT_REMAP_COMB_COEFF }; enum opp_regamma { OPP_REGAMMA_BYPASS = 0, OPP_REGAMMA_SRGB, OPP_REGAMMA_XVYCC, OPP_REGAMMA_USER }; enum optc_dsc_mode { OPTC_DSC_DISABLED = 0, OPTC_DSC_ENABLED_444 = 1, /* 'RGB 444' or 'Simple YCbCr 4:2:2' (4:2:2 upsampled to 4:4:4) */ OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED = 2 /* Native 4:2:2 or 4:2:0 */ }; struct dc_bias_and_scale { uint32_t scale_red; uint32_t bias_red; uint32_t scale_green; uint32_t bias_green; uint32_t scale_blue; uint32_t bias_blue; bool bias_and_scale_valid; }; enum test_pattern_dyn_range { TEST_PATTERN_DYN_RANGE_VESA = 0, TEST_PATTERN_DYN_RANGE_CEA }; enum test_pattern_mode { TEST_PATTERN_MODE_COLORSQUARES_RGB = 0, TEST_PATTERN_MODE_COLORSQUARES_YCBCR601, TEST_PATTERN_MODE_COLORSQUARES_YCBCR709, TEST_PATTERN_MODE_VERTICALBARS, TEST_PATTERN_MODE_HORIZONTALBARS, TEST_PATTERN_MODE_SINGLERAMP_RGB, TEST_PATTERN_MODE_DUALRAMP_RGB, TEST_PATTERN_MODE_XR_BIAS_RGB }; enum test_pattern_color_format { TEST_PATTERN_COLOR_FORMAT_BPC_6 = 0, TEST_PATTERN_COLOR_FORMAT_BPC_8, TEST_PATTERN_COLOR_FORMAT_BPC_10, TEST_PATTERN_COLOR_FORMAT_BPC_12 }; enum controller_dp_test_pattern { CONTROLLER_DP_TEST_PATTERN_D102 = 0, CONTROLLER_DP_TEST_PATTERN_SYMBOLERROR, CONTROLLER_DP_TEST_PATTERN_PRBS7, CONTROLLER_DP_TEST_PATTERN_COLORSQUARES, CONTROLLER_DP_TEST_PATTERN_VERTICALBARS, CONTROLLER_DP_TEST_PATTERN_HORIZONTALBARS, CONTROLLER_DP_TEST_PATTERN_COLORRAMP, CONTROLLER_DP_TEST_PATTERN_VIDEOMODE, CONTROLLER_DP_TEST_PATTERN_RESERVED_8, CONTROLLER_DP_TEST_PATTERN_RESERVED_9, CONTROLLER_DP_TEST_PATTERN_RESERVED_A, CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA, CONTROLLER_DP_TEST_PATTERN_SOLID_COLOR }; enum controller_dp_color_space { CONTROLLER_DP_COLOR_SPACE_RGB, CONTROLLER_DP_COLOR_SPACE_YCBCR601, CONTROLLER_DP_COLOR_SPACE_YCBCR709, CONTROLLER_DP_COLOR_SPACE_UDEFINED }; enum dc_lut_mode { LUT_BYPASS, LUT_RAM_A, LUT_RAM_B }; /** * speakersToChannels * * @brief * translate speakers to channels * * FL - Front Left * FR - Front Right * RL - Rear Left * RR - Rear Right * RC - Rear Center * FC - Front Center * FLC - Front Left Center * FRC - Front Right Center * RLC - Rear Left Center * RRC - Rear Right Center * LFE - Low Freq Effect * * FC * FLC FRC * FL FR * * LFE * () * * * RL RR * RLC RRC * RC * * ch 8 7 6 5 4 3 2 1 * 0b00000011 - - - - - - FR FL * 0b00000111 - - - - - LFE FR FL * 0b00001011 - - - - FC - FR FL * 0b00001111 - - - - FC LFE FR FL * 0b00010011 - - - RC - - FR FL * 0b00010111 - - - RC - LFE FR FL * 0b00011011 - - - RC FC - FR FL * 0b00011111 - - - RC FC LFE FR FL * 0b00110011 - - RR RL - - FR FL * 0b00110111 - - RR RL - LFE FR FL * 0b00111011 - - RR RL FC - FR FL * 0b00111111 - - RR RL FC LFE FR FL * 0b01110011 - RC RR RL - - FR FL * 0b01110111 - RC RR RL - LFE FR FL * 0b01111011 - RC RR RL FC - FR FL * 0b01111111 - RC RR RL FC LFE FR FL * 0b11110011 RRC RLC RR RL - - FR FL * 0b11110111 RRC RLC RR RL - LFE FR FL * 0b11111011 RRC RLC RR RL FC - FR FL * 0b11111111 RRC RLC RR RL FC LFE FR FL * 0b11000011 FRC FLC - - - - FR FL * 0b11000111 FRC FLC - - - LFE FR FL * 0b11001011 FRC FLC - - FC - FR FL * 0b11001111 FRC FLC - - FC LFE FR FL * 0b11010011 FRC FLC - RC - - FR FL * 0b11010111 FRC FLC - RC - LFE FR FL * 0b11011011 FRC FLC - RC FC - FR FL * 0b11011111 FRC FLC - RC FC LFE FR FL * 0b11110011 FRC FLC RR RL - - FR FL * 0b11110111 FRC FLC RR RL - LFE FR FL * 0b11111011 FRC FLC RR RL FC - FR FL * 0b11111111 FRC FLC RR RL FC LFE FR FL * * @param * speakers - speaker information as it comes from CEA audio block */ /* translate speakers to channels */ union audio_cea_channels { uint8_t all; struct audio_cea_channels_bits { uint32_t FL:1; uint32_t FR:1; uint32_t LFE:1; uint32_t FC:1; uint32_t RL_RC:1; uint32_t RR:1; uint32_t RC_RLC_FLC:1; uint32_t RRC_FRC:1; } channels; }; #endif /* __DAL_HW_SHARED_H__ */
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Copyright 2019 NXP */ #include <dt-bindings/clock/imx8mn-clock.h> #include <dt-bindings/power/imx8mn-power.h> #include <dt-bindings/reset/imx8mq-reset.h> #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> #include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/thermal/thermal.h> #include "imx8mn-pinfunc.h" / { interrupt-parent = <&gic>; #address-cells = <2>; #size-cells = <2>; aliases { ethernet0 = &fec1; gpio0 = &gpio1; gpio1 = &gpio2; gpio2 = &gpio3; gpio3 = &gpio4; gpio4 = &gpio5; i2c0 = &i2c1; i2c1 = &i2c2; i2c2 = &i2c3; i2c3 = &i2c4; mmc0 = &usdhc1; mmc1 = &usdhc2; mmc2 = &usdhc3; serial0 = &uart1; serial1 = &uart2; serial2 = &uart3; serial3 = &uart4; spi0 = &ecspi1; spi1 = &ecspi2; spi2 = &ecspi3; }; cpus { #address-cells = <1>; #size-cells = <0>; idle-states { entry-method = "psci"; cpu_pd_wait: cpu-pd-wait { compatible = "arm,idle-state"; arm,psci-suspend-param = <0x0010033>; local-timer-stop; entry-latency-us = <1000>; exit-latency-us = <700>; min-residency-us = <2700>; }; }; A53_0: cpu@0 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0x0>; clock-latency = <61036>; clocks = <&clk IMX8MN_CLK_ARM>; enable-method = "psci"; i-cache-size = <0x8000>; i-cache-line-size = <64>; i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; d-cache-sets = <128>; next-level-cache = <&A53_L2>; operating-points-v2 = <&a53_opp_table>; nvmem-cells = <&cpu_speed_grade>; nvmem-cell-names = "speed_grade"; cpu-idle-states = <&cpu_pd_wait>; #cooling-cells = <2>; }; A53_1: cpu@1 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0x1>; clock-latency = <61036>; clocks = <&clk IMX8MN_CLK_ARM>; enable-method = "psci"; i-cache-size = <0x8000>; i-cache-line-size = <64>; i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; d-cache-sets = <128>; next-level-cache = <&A53_L2>; operating-points-v2 = <&a53_opp_table>; cpu-idle-states = <&cpu_pd_wait>; #cooling-cells = <2>; }; A53_2: cpu@2 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0x2>; clock-latency = <61036>; clocks = <&clk IMX8MN_CLK_ARM>; enable-method = "psci"; i-cache-size = <0x8000>; i-cache-line-size = <64>; i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; d-cache-sets = <128>; next-level-cache = <&A53_L2>; operating-points-v2 = <&a53_opp_table>; cpu-idle-states = <&cpu_pd_wait>; #cooling-cells = <2>; }; A53_3: cpu@3 { device_type = "cpu"; compatible = "arm,cortex-a53"; reg = <0x3>; clock-latency = <61036>; clocks = <&clk IMX8MN_CLK_ARM>; enable-method = "psci"; i-cache-size = <0x8000>; i-cache-line-size = <64>; i-cache-sets = <256>; d-cache-size = <0x8000>; d-cache-line-size = <64>; d-cache-sets = <128>; next-level-cache = <&A53_L2>; operating-points-v2 = <&a53_opp_table>; cpu-idle-states = <&cpu_pd_wait>; #cooling-cells = <2>; }; A53_L2: l2-cache0 { compatible = "cache"; cache-level = <2>; cache-unified; cache-size = <0x80000>; cache-line-size = <64>; cache-sets = <512>; }; }; a53_opp_table: opp-table { compatible = "operating-points-v2"; opp-shared; opp-1200000000 { opp-hz = /bits/ 64 <1200000000>; opp-microvolt = <850000>; opp-supported-hw = <0xb00>, <0x7>; clock-latency-ns = <150000>; opp-suspend; }; opp-1400000000 { opp-hz = /bits/ 64 <1400000000>; opp-microvolt = <950000>; opp-supported-hw = <0x300>, <0x7>; clock-latency-ns = <150000>; opp-suspend; }; opp-1500000000 { opp-hz = /bits/ 64 <1500000000>; opp-microvolt = <1000000>; opp-supported-hw = <0x100>, <0x3>; clock-latency-ns = <150000>; opp-suspend; }; }; osc_32k: clock-osc-32k { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <32768>; clock-output-names = "osc_32k"; }; osc_24m: clock-osc-24m { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <24000000>; clock-output-names = "osc_24m"; }; clk_ext1: clock-ext1 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <133000000>; clock-output-names = "clk_ext1"; }; clk_ext2: clock-ext2 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <133000000>; clock-output-names = "clk_ext2"; }; clk_ext3: clock-ext3 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <133000000>; clock-output-names = "clk_ext3"; }; clk_ext4: clock-ext4 { compatible = "fixed-clock"; #clock-cells = <0>; clock-frequency = <133000000>; clock-output-names = "clk_ext4"; }; pmu { compatible = "arm,cortex-a53-pmu"; interrupts = <GIC_PPI 7 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_HIGH)>; }; psci { compatible = "arm,psci-1.0"; method = "smc"; }; thermal-zones { cpu-thermal { polling-delay-passive = <250>; polling-delay = <2000>; thermal-sensors = <&tmu>; trips { cpu_alert0: trip0 { temperature = <85000>; hysteresis = <2000>; type = "passive"; }; cpu_crit0: trip1 { temperature = <95000>; hysteresis = <2000>; type = "critical"; }; }; cooling-maps { map0 { trip = <&cpu_alert0>; cooling-device = <&A53_0 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, <&A53_1 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, <&A53_2 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>, <&A53_3 THERMAL_NO_LIMIT THERMAL_NO_LIMIT>; }; }; }; }; timer { compatible = "arm,armv8-timer"; interrupts = <GIC_PPI 13 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 14 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 11 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>, <GIC_PPI 10 (GIC_CPU_MASK_SIMPLE(4) | IRQ_TYPE_LEVEL_LOW)>; clock-frequency = <8000000>; arm,no-tick-in-suspend; }; soc: soc@0 { compatible = "fsl,imx8mn-soc", "simple-bus"; #address-cells = <1>; #size-cells = <1>; ranges = <0x0 0x0 0x0 0x3e000000>; dma-ranges = <0x40000000 0x0 0x40000000 0xc0000000>; nvmem-cells = <&imx8mn_uid>; nvmem-cell-names = "soc_unique_id"; aips1: bus@30000000 { compatible = "fsl,aips-bus", "simple-bus"; reg = <0x30000000 0x400000>; #address-cells = <1>; #size-cells = <1>; ranges; spba2: spba-bus@30000000 { compatible = "fsl,spba-bus", "simple-bus"; #address-cells = <1>; #size-cells = <1>; reg = <0x30000000 0x100000>; ranges; sai2: sai@30020000 { compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; reg = <0x30020000 0x10000>; #sound-dai-cells = <0>; interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SAI2_IPG>, <&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_CLK_SAI2_ROOT>, <&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_CLK_DUMMY>; clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; dmas = <&sdma2 2 2 0>, <&sdma2 3 2 0>; dma-names = "rx", "tx"; status = "disabled"; }; sai3: sai@30030000 { compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; reg = <0x30030000 0x10000>; #sound-dai-cells = <0>; interrupts = <GIC_SPI 50 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SAI3_IPG>, <&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_CLK_SAI3_ROOT>, <&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_CLK_DUMMY>; clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; dmas = <&sdma2 4 2 0>, <&sdma2 5 2 0>; dma-names = "rx", "tx"; status = "disabled"; }; sai5: sai@30050000 { compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; reg = <0x30050000 0x10000>; #sound-dai-cells = <0>; interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SAI5_IPG>, <&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_CLK_SAI5_ROOT>, <&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_CLK_DUMMY>; clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; dmas = <&sdma2 8 2 0>, <&sdma2 9 2 0>; dma-names = "rx", "tx"; fsl,shared-interrupt; fsl,dataline = <0 0xf 0xf>; status = "disabled"; }; sai6: sai@30060000 { compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; reg = <0x30060000 0x10000>; #sound-dai-cells = <0>; interrupts = <GIC_SPI 90 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SAI6_IPG>, <&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_CLK_SAI6_ROOT>, <&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_CLK_DUMMY>; clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; dmas = <&sdma2 10 2 0>, <&sdma2 11 2 0>; dma-names = "rx", "tx"; status = "disabled"; }; micfil: audio-controller@30080000 { compatible = "fsl,imx8mm-micfil"; reg = <0x30080000 0x10000>; interrupts = <GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 44 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 45 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_PDM_IPG>, <&clk IMX8MN_CLK_PDM_ROOT>, <&clk IMX8MN_AUDIO_PLL1_OUT>, <&clk IMX8MN_AUDIO_PLL2_OUT>, <&clk IMX8MN_CLK_EXT3>; clock-names = "ipg_clk", "ipg_clk_app", "pll8k", "pll11k", "clkext3"; dmas = <&sdma2 24 25 0x80000000>; dma-names = "rx"; #sound-dai-cells = <0>; status = "disabled"; }; spdif1: spdif@30090000 { compatible = "fsl,imx35-spdif"; reg = <0x30090000 0x10000>; interrupts = <GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_AUDIO_AHB>, /* core */ <&clk IMX8MN_CLK_24M>, /* rxtx0 */ <&clk IMX8MN_CLK_SPDIF1>, /* rxtx1 */ <&clk IMX8MN_CLK_DUMMY>, /* rxtx2 */ <&clk IMX8MN_CLK_DUMMY>, /* rxtx3 */ <&clk IMX8MN_CLK_DUMMY>, /* rxtx4 */ <&clk IMX8MN_CLK_AUDIO_AHB>, /* rxtx5 */ <&clk IMX8MN_CLK_DUMMY>, /* rxtx6 */ <&clk IMX8MN_CLK_DUMMY>, /* rxtx7 */ <&clk IMX8MN_CLK_DUMMY>; /* spba */ clock-names = "core", "rxtx0", "rxtx1", "rxtx2", "rxtx3", "rxtx4", "rxtx5", "rxtx6", "rxtx7", "spba"; dmas = <&sdma2 28 18 0>, <&sdma2 29 18 0>; dma-names = "rx", "tx"; status = "disabled"; }; sai7: sai@300b0000 { compatible = "fsl,imx8mn-sai", "fsl,imx8mq-sai"; reg = <0x300b0000 0x10000>; #sound-dai-cells = <0>; interrupts = <GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SAI7_IPG>, <&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_CLK_SAI7_ROOT>, <&clk IMX8MN_CLK_DUMMY>, <&clk IMX8MN_CLK_DUMMY>; clock-names = "bus", "mclk0", "mclk1", "mclk2", "mclk3"; dmas = <&sdma2 12 2 0>, <&sdma2 13 2 0>; dma-names = "rx", "tx"; status = "disabled"; }; easrc: easrc@300c0000 { compatible = "fsl,imx8mn-easrc"; reg = <0x300c0000 0x10000>; interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_ASRC_ROOT>; clock-names = "mem"; dmas = <&sdma2 16 23 0> , <&sdma2 17 23 0>, <&sdma2 18 23 0> , <&sdma2 19 23 0>, <&sdma2 20 23 0> , <&sdma2 21 23 0>, <&sdma2 22 23 0> , <&sdma2 23 23 0>; dma-names = "ctx0_rx", "ctx0_tx", "ctx1_rx", "ctx1_tx", "ctx2_rx", "ctx2_tx", "ctx3_rx", "ctx3_tx"; firmware-name = "imx/easrc/easrc-imx8mn.bin"; fsl,asrc-rate = <8000>; fsl,asrc-format = <2>; status = "disabled"; }; }; gpio1: gpio@30200000 { compatible = "fsl,imx8mn-gpio", "fsl,imx35-gpio"; reg = <0x30200000 0x10000>; interrupts = <GIC_SPI 64 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 65 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_GPIO1_ROOT>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; gpio-ranges = <&iomuxc 0 10 30>; }; gpio2: gpio@30210000 { compatible = "fsl,imx8mn-gpio", "fsl,imx35-gpio"; reg = <0x30210000 0x10000>; interrupts = <GIC_SPI 66 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 67 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_GPIO2_ROOT>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; gpio-ranges = <&iomuxc 0 40 21>; }; gpio3: gpio@30220000 { compatible = "fsl,imx8mn-gpio", "fsl,imx35-gpio"; reg = <0x30220000 0x10000>; interrupts = <GIC_SPI 68 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_GPIO3_ROOT>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; gpio-ranges = <&iomuxc 0 61 26>; }; gpio4: gpio@30230000 { compatible = "fsl,imx8mn-gpio", "fsl,imx35-gpio"; reg = <0x30230000 0x10000>; interrupts = <GIC_SPI 70 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 71 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_GPIO4_ROOT>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; gpio-ranges = <&iomuxc 21 108 11>; }; gpio5: gpio@30240000 { compatible = "fsl,imx8mn-gpio", "fsl,imx35-gpio"; reg = <0x30240000 0x10000>; interrupts = <GIC_SPI 72 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 73 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_GPIO5_ROOT>; gpio-controller; #gpio-cells = <2>; interrupt-controller; #interrupt-cells = <2>; gpio-ranges = <&iomuxc 0 119 30>; }; tmu: tmu@30260000 { compatible = "fsl,imx8mn-tmu", "fsl,imx8mm-tmu"; reg = <0x30260000 0x10000>; clocks = <&clk IMX8MN_CLK_TMU_ROOT>; nvmem-cells = <&tmu_calib>; nvmem-cell-names = "calib"; #thermal-sensor-cells = <0>; }; wdog1: watchdog@30280000 { compatible = "fsl,imx8mn-wdt", "fsl,imx21-wdt"; reg = <0x30280000 0x10000>; interrupts = <GIC_SPI 78 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_WDOG1_ROOT>; status = "disabled"; }; wdog2: watchdog@30290000 { compatible = "fsl,imx8mn-wdt", "fsl,imx21-wdt"; reg = <0x30290000 0x10000>; interrupts = <GIC_SPI 79 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_WDOG2_ROOT>; status = "disabled"; }; wdog3: watchdog@302a0000 { compatible = "fsl,imx8mn-wdt", "fsl,imx21-wdt"; reg = <0x302a0000 0x10000>; interrupts = <GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_WDOG3_ROOT>; status = "disabled"; }; sdma3: dma-controller@302b0000 { compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma"; reg = <0x302b0000 0x10000>; interrupts = <GIC_SPI 34 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SDMA3_ROOT>, <&clk IMX8MN_CLK_SDMA3_ROOT>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; }; sdma2: dma-controller@302c0000 { compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma"; reg = <0x302c0000 0x10000>; interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SDMA2_ROOT>, <&clk IMX8MN_CLK_SDMA2_ROOT>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; }; iomuxc: pinctrl@30330000 { compatible = "fsl,imx8mn-iomuxc"; reg = <0x30330000 0x10000>; }; gpr: syscon@30340000 { compatible = "fsl,imx8mn-iomuxc-gpr", "syscon"; reg = <0x30340000 0x10000>; }; ocotp: efuse@30350000 { compatible = "fsl,imx8mn-ocotp", "fsl,imx8mm-ocotp", "syscon"; reg = <0x30350000 0x10000>; clocks = <&clk IMX8MN_CLK_OCOTP_ROOT>; #address-cells = <1>; #size-cells = <1>; /* * The register address below maps to the MX8M * Fusemap Description Table entries this way. * Assuming * reg = <ADDR SIZE>; * then * Fuse Address = (ADDR * 4) + 0x400 * Note that if SIZE is greater than 4, then * each subsequent fuse is located at offset * +0x10 in Fusemap Description Table (e.g. * reg = <0x4 0x8> describes fuses 0x410 and * 0x420). */ imx8mn_uid: unique-id@4 { /* 0x410-0x420 */ reg = <0x4 0x8>; }; cpu_speed_grade: speed-grade@10 { /* 0x440 */ reg = <0x10 4>; }; tmu_calib: calib@3c { /* 0x4f0 */ reg = <0x3c 4>; }; fec_mac_address: mac-address@90 { /* 0x640 */ reg = <0x90 6>; }; }; anatop: clock-controller@30360000 { compatible = "fsl,imx8mn-anatop", "fsl,imx8mm-anatop"; reg = <0x30360000 0x10000>; #clock-cells = <1>; }; snvs: snvs@30370000 { compatible = "fsl,sec-v4.0-mon","syscon", "simple-mfd"; reg = <0x30370000 0x10000>; snvs_rtc: snvs-rtc-lp { compatible = "fsl,sec-v4.0-mon-rtc-lp"; regmap = <&snvs>; offset = <0x34>; interrupts = <GIC_SPI 19 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 20 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SNVS_ROOT>; clock-names = "snvs-rtc"; }; snvs_pwrkey: snvs-powerkey { compatible = "fsl,sec-v4.0-pwrkey"; regmap = <&snvs>; interrupts = <GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SNVS_ROOT>; clock-names = "snvs-pwrkey"; linux,keycode = <KEY_POWER>; wakeup-source; status = "disabled"; }; }; clk: clock-controller@30380000 { compatible = "fsl,imx8mn-ccm"; reg = <0x30380000 0x10000>; interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>; #clock-cells = <1>; clocks = <&osc_32k>, <&osc_24m>, <&clk_ext1>, <&clk_ext2>, <&clk_ext3>, <&clk_ext4>; clock-names = "osc_32k", "osc_24m", "clk_ext1", "clk_ext2", "clk_ext3", "clk_ext4"; assigned-clocks = <&clk IMX8MN_CLK_A53_SRC>, <&clk IMX8MN_CLK_A53_CORE>, <&clk IMX8MN_CLK_NOC>, <&clk IMX8MN_CLK_AUDIO_AHB>, <&clk IMX8MN_CLK_IPG_AUDIO_ROOT>, <&clk IMX8MN_SYS_PLL3>, <&clk IMX8MN_AUDIO_PLL1>, <&clk IMX8MN_AUDIO_PLL2>; assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_800M>, <&clk IMX8MN_ARM_PLL_OUT>, <&clk IMX8MN_SYS_PLL3_OUT>, <&clk IMX8MN_SYS_PLL1_800M>; assigned-clock-rates = <0>, <0>, <0>, <400000000>, <400000000>, <600000000>, <393216000>, <361267200>; }; src: reset-controller@30390000 { compatible = "fsl,imx8mn-src", "fsl,imx8mq-src", "syscon"; reg = <0x30390000 0x10000>; interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>; #reset-cells = <1>; }; gpc: gpc@303a0000 { compatible = "fsl,imx8mn-gpc"; reg = <0x303a0000 0x10000>; interrupt-parent = <&gic>; interrupts = <GIC_SPI 87 IRQ_TYPE_LEVEL_HIGH>; pgc { #address-cells = <1>; #size-cells = <0>; pgc_hsiomix: power-domain@0 { #power-domain-cells = <0>; reg = <IMX8MN_POWER_DOMAIN_HSIOMIX>; clocks = <&clk IMX8MN_CLK_USB_BUS>; }; pgc_otg1: power-domain@1 { #power-domain-cells = <0>; reg = <IMX8MN_POWER_DOMAIN_OTG1>; }; pgc_gpumix: power-domain@2 { #power-domain-cells = <0>; reg = <IMX8MN_POWER_DOMAIN_GPUMIX>; clocks = <&clk IMX8MN_CLK_GPU_CORE_ROOT>, <&clk IMX8MN_CLK_GPU_SHADER>, <&clk IMX8MN_CLK_GPU_BUS_ROOT>, <&clk IMX8MN_CLK_GPU_AHB>; }; pgc_dispmix: power-domain@3 { #power-domain-cells = <0>; reg = <IMX8MN_POWER_DOMAIN_DISPMIX>; clocks = <&clk IMX8MN_CLK_DISP_AXI_ROOT>, <&clk IMX8MN_CLK_DISP_APB_ROOT>; }; pgc_mipi: power-domain@4 { #power-domain-cells = <0>; reg = <IMX8MN_POWER_DOMAIN_MIPI>; power-domains = <&pgc_dispmix>; }; }; }; }; aips2: bus@30400000 { compatible = "fsl,aips-bus", "simple-bus"; reg = <0x30400000 0x400000>; #address-cells = <1>; #size-cells = <1>; ranges; pwm1: pwm@30660000 { compatible = "fsl,imx8mn-pwm", "fsl,imx27-pwm"; reg = <0x30660000 0x10000>; interrupts = <GIC_SPI 81 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_PWM1_ROOT>, <&clk IMX8MN_CLK_PWM1_ROOT>; clock-names = "ipg", "per"; #pwm-cells = <3>; status = "disabled"; }; pwm2: pwm@30670000 { compatible = "fsl,imx8mn-pwm", "fsl,imx27-pwm"; reg = <0x30670000 0x10000>; interrupts = <GIC_SPI 82 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_PWM2_ROOT>, <&clk IMX8MN_CLK_PWM2_ROOT>; clock-names = "ipg", "per"; #pwm-cells = <3>; status = "disabled"; }; pwm3: pwm@30680000 { compatible = "fsl,imx8mn-pwm", "fsl,imx27-pwm"; reg = <0x30680000 0x10000>; interrupts = <GIC_SPI 83 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_PWM3_ROOT>, <&clk IMX8MN_CLK_PWM3_ROOT>; clock-names = "ipg", "per"; #pwm-cells = <3>; status = "disabled"; }; pwm4: pwm@30690000 { compatible = "fsl,imx8mn-pwm", "fsl,imx27-pwm"; reg = <0x30690000 0x10000>; interrupts = <GIC_SPI 84 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_PWM4_ROOT>, <&clk IMX8MN_CLK_PWM4_ROOT>; clock-names = "ipg", "per"; #pwm-cells = <3>; status = "disabled"; }; system_counter: timer@306a0000 { compatible = "nxp,sysctr-timer"; reg = <0x306a0000 0x20000>; interrupts = <GIC_SPI 47 IRQ_TYPE_LEVEL_HIGH>; clocks = <&osc_24m>; clock-names = "per"; }; }; aips3: bus@30800000 { compatible = "fsl,aips-bus", "simple-bus"; reg = <0x30800000 0x400000>; #address-cells = <1>; #size-cells = <1>; ranges; spba1: spba-bus@30800000 { compatible = "fsl,spba-bus", "simple-bus"; #address-cells = <1>; #size-cells = <1>; reg = <0x30800000 0x100000>; ranges; ecspi1: spi@30820000 { compatible = "fsl,imx8mn-ecspi", "fsl,imx51-ecspi"; #address-cells = <1>; #size-cells = <0>; reg = <0x30820000 0x10000>; interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_ECSPI1_ROOT>, <&clk IMX8MN_CLK_ECSPI1_ROOT>; clock-names = "ipg", "per"; dmas = <&sdma1 0 7 1>, <&sdma1 1 7 2>; dma-names = "rx", "tx"; status = "disabled"; }; ecspi2: spi@30830000 { compatible = "fsl,imx8mn-ecspi", "fsl,imx51-ecspi"; #address-cells = <1>; #size-cells = <0>; reg = <0x30830000 0x10000>; interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_ECSPI2_ROOT>, <&clk IMX8MN_CLK_ECSPI2_ROOT>; clock-names = "ipg", "per"; dmas = <&sdma1 2 7 1>, <&sdma1 3 7 2>; dma-names = "rx", "tx"; status = "disabled"; }; ecspi3: spi@30840000 { compatible = "fsl,imx8mn-ecspi", "fsl,imx51-ecspi"; #address-cells = <1>; #size-cells = <0>; reg = <0x30840000 0x10000>; interrupts = <GIC_SPI 33 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_ECSPI3_ROOT>, <&clk IMX8MN_CLK_ECSPI3_ROOT>; clock-names = "ipg", "per"; dmas = <&sdma1 4 7 1>, <&sdma1 5 7 2>; dma-names = "rx", "tx"; status = "disabled"; }; uart1: serial@30860000 { compatible = "fsl,imx8mn-uart", "fsl,imx6q-uart"; reg = <0x30860000 0x10000>; interrupts = <GIC_SPI 26 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_UART1_ROOT>, <&clk IMX8MN_CLK_UART1_ROOT>; clock-names = "ipg", "per"; dmas = <&sdma1 22 4 0>, <&sdma1 23 4 0>; dma-names = "rx", "tx"; status = "disabled"; }; uart3: serial@30880000 { compatible = "fsl,imx8mn-uart", "fsl,imx6q-uart"; reg = <0x30880000 0x10000>; interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_UART3_ROOT>, <&clk IMX8MN_CLK_UART3_ROOT>; clock-names = "ipg", "per"; dmas = <&sdma1 26 4 0>, <&sdma1 27 4 0>; dma-names = "rx", "tx"; status = "disabled"; }; uart2: serial@30890000 { compatible = "fsl,imx8mn-uart", "fsl,imx6q-uart"; reg = <0x30890000 0x10000>; interrupts = <GIC_SPI 27 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_UART2_ROOT>, <&clk IMX8MN_CLK_UART2_ROOT>; clock-names = "ipg", "per"; status = "disabled"; }; }; crypto: crypto@30900000 { compatible = "fsl,sec-v4.0"; #address-cells = <1>; #size-cells = <1>; reg = <0x30900000 0x40000>; ranges = <0 0x30900000 0x40000>; interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_AHB>, <&clk IMX8MN_CLK_IPG_ROOT>; clock-names = "aclk", "ipg"; sec_jr0: jr@1000 { compatible = "fsl,sec-v4.0-job-ring"; reg = <0x1000 0x1000>; interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>; status = "disabled"; }; sec_jr1: jr@2000 { compatible = "fsl,sec-v4.0-job-ring"; reg = <0x2000 0x1000>; interrupts = <GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>; }; sec_jr2: jr@3000 { compatible = "fsl,sec-v4.0-job-ring"; reg = <0x3000 0x1000>; interrupts = <GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>; }; }; i2c1: i2c@30a20000 { compatible = "fsl,imx8mn-i2c", "fsl,imx21-i2c"; #address-cells = <1>; #size-cells = <0>; reg = <0x30a20000 0x10000>; interrupts = <GIC_SPI 35 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_I2C1_ROOT>; status = "disabled"; }; i2c2: i2c@30a30000 { compatible = "fsl,imx8mn-i2c", "fsl,imx21-i2c"; #address-cells = <1>; #size-cells = <0>; reg = <0x30a30000 0x10000>; interrupts = <GIC_SPI 36 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_I2C2_ROOT>; status = "disabled"; }; i2c3: i2c@30a40000 { #address-cells = <1>; #size-cells = <0>; compatible = "fsl,imx8mn-i2c", "fsl,imx21-i2c"; reg = <0x30a40000 0x10000>; interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_I2C3_ROOT>; status = "disabled"; }; i2c4: i2c@30a50000 { compatible = "fsl,imx8mn-i2c", "fsl,imx21-i2c"; #address-cells = <1>; #size-cells = <0>; reg = <0x30a50000 0x10000>; interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_I2C4_ROOT>; status = "disabled"; }; uart4: serial@30a60000 { compatible = "fsl,imx8mn-uart", "fsl,imx6q-uart"; reg = <0x30a60000 0x10000>; interrupts = <GIC_SPI 29 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_UART4_ROOT>, <&clk IMX8MN_CLK_UART4_ROOT>; clock-names = "ipg", "per"; dmas = <&sdma1 28 4 0>, <&sdma1 29 4 0>; dma-names = "rx", "tx"; status = "disabled"; }; mu: mailbox@30aa0000 { compatible = "fsl,imx8mn-mu", "fsl,imx6sx-mu"; reg = <0x30aa0000 0x10000>; interrupts = <GIC_SPI 88 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_MU_ROOT>; #mbox-cells = <2>; }; usdhc1: mmc@30b40000 { compatible = "fsl,imx8mn-usdhc", "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc"; reg = <0x30b40000 0x10000>; interrupts = <GIC_SPI 22 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_IPG_ROOT>, <&clk IMX8MN_CLK_NAND_USDHC_BUS>, <&clk IMX8MN_CLK_USDHC1_ROOT>; clock-names = "ipg", "ahb", "per"; fsl,tuning-start-tap = <20>; fsl,tuning-step = <2>; bus-width = <4>; status = "disabled"; }; usdhc2: mmc@30b50000 { compatible = "fsl,imx8mn-usdhc", "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc"; reg = <0x30b50000 0x10000>; interrupts = <GIC_SPI 23 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_IPG_ROOT>, <&clk IMX8MN_CLK_NAND_USDHC_BUS>, <&clk IMX8MN_CLK_USDHC2_ROOT>; clock-names = "ipg", "ahb", "per"; fsl,tuning-start-tap = <20>; fsl,tuning-step = <2>; bus-width = <4>; status = "disabled"; }; usdhc3: mmc@30b60000 { compatible = "fsl,imx8mn-usdhc", "fsl,imx8mm-usdhc", "fsl,imx7d-usdhc"; reg = <0x30b60000 0x10000>; interrupts = <GIC_SPI 24 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_IPG_ROOT>, <&clk IMX8MN_CLK_NAND_USDHC_BUS>, <&clk IMX8MN_CLK_USDHC3_ROOT>; clock-names = "ipg", "ahb", "per"; fsl,tuning-start-tap = <20>; fsl,tuning-step = <2>; bus-width = <4>; status = "disabled"; }; flexspi: spi@30bb0000 { #address-cells = <1>; #size-cells = <0>; compatible = "nxp,imx8mm-fspi"; reg = <0x30bb0000 0x10000>, <0x8000000 0x10000000>; reg-names = "fspi_base", "fspi_mmap"; interrupts = <GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_QSPI_ROOT>, <&clk IMX8MN_CLK_QSPI_ROOT>; clock-names = "fspi_en", "fspi"; status = "disabled"; }; sdma1: dma-controller@30bd0000 { compatible = "fsl,imx8mn-sdma", "fsl,imx8mq-sdma"; reg = <0x30bd0000 0x10000>; interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_SDMA1_ROOT>, <&clk IMX8MN_CLK_AHB>; clock-names = "ipg", "ahb"; #dma-cells = <3>; fsl,sdma-ram-script-name = "imx/sdma/sdma-imx7d.bin"; }; fec1: ethernet@30be0000 { compatible = "fsl,imx8mn-fec", "fsl,imx8mq-fec", "fsl,imx6sx-fec"; reg = <0x30be0000 0x10000>; interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_ENET1_ROOT>, <&clk IMX8MN_CLK_ENET1_ROOT>, <&clk IMX8MN_CLK_ENET_TIMER>, <&clk IMX8MN_CLK_ENET_REF>, <&clk IMX8MN_CLK_ENET_PHY_REF>; clock-names = "ipg", "ahb", "ptp", "enet_clk_ref", "enet_out"; assigned-clocks = <&clk IMX8MN_CLK_ENET_AXI>, <&clk IMX8MN_CLK_ENET_TIMER>, <&clk IMX8MN_CLK_ENET_REF>, <&clk IMX8MN_CLK_ENET_PHY_REF>; assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_266M>, <&clk IMX8MN_SYS_PLL2_100M>, <&clk IMX8MN_SYS_PLL2_125M>, <&clk IMX8MN_SYS_PLL2_50M>; assigned-clock-rates = <0>, <100000000>, <125000000>, <0>; fsl,num-tx-queues = <3>; fsl,num-rx-queues = <3>; nvmem-cells = <&fec_mac_address>; nvmem-cell-names = "mac-address"; fsl,stop-mode = <&gpr 0x10 3>; status = "disabled"; }; }; aips4: bus@32c00000 { compatible = "fsl,aips-bus", "simple-bus"; reg = <0x32c00000 0x400000>; #address-cells = <1>; #size-cells = <1>; ranges; lcdif: lcdif@32e00000 { compatible = "fsl,imx8mn-lcdif", "fsl,imx6sx-lcdif"; reg = <0x32e00000 0x10000>; clocks = <&clk IMX8MN_CLK_DISP_PIXEL_ROOT>, <&clk IMX8MN_CLK_DISP_APB_ROOT>, <&clk IMX8MN_CLK_DISP_AXI_ROOT>; clock-names = "pix", "axi", "disp_axi"; interrupts = <GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>; power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_LCDIF>; status = "disabled"; port { lcdif_to_dsim: endpoint { remote-endpoint = <&dsim_from_lcdif>; }; }; }; mipi_dsi: dsi@32e10000 { compatible = "fsl,imx8mn-mipi-dsim", "fsl,imx8mm-mipi-dsim"; reg = <0x32e10000 0x400>; clocks = <&clk IMX8MN_CLK_DSI_CORE>, <&clk IMX8MN_CLK_DSI_PHY_REF>; clock-names = "bus_clk", "sclk_mipi"; interrupts = <GIC_SPI 18 IRQ_TYPE_LEVEL_HIGH>; power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_MIPI_DSI>; status = "disabled"; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; dsim_from_lcdif: endpoint { remote-endpoint = <&lcdif_to_dsim>; }; }; port@1 { reg = <1>; mipi_dsi_out: endpoint { }; }; }; }; isi: isi@32e20000 { compatible = "fsl,imx8mn-isi"; reg = <0x32e20000 0x8000>; interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_DISP_AXI_ROOT>, <&clk IMX8MN_CLK_DISP_APB_ROOT>; clock-names = "axi", "apb"; fsl,blk-ctrl = <&disp_blk_ctrl>; power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_ISI>; status = "disabled"; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; isi_in: endpoint { remote-endpoint = <&mipi_csi_out>; }; }; }; }; disp_blk_ctrl: blk-ctrl@32e28000 { compatible = "fsl,imx8mn-disp-blk-ctrl", "syscon"; reg = <0x32e28000 0x100>; power-domains = <&pgc_dispmix>, <&pgc_dispmix>, <&pgc_dispmix>, <&pgc_mipi>, <&pgc_mipi>; power-domain-names = "bus", "isi", "lcdif", "mipi-dsi", "mipi-csi"; clocks = <&clk IMX8MN_CLK_DISP_AXI>, <&clk IMX8MN_CLK_DISP_APB>, <&clk IMX8MN_CLK_DISP_AXI_ROOT>, <&clk IMX8MN_CLK_DISP_APB_ROOT>, <&clk IMX8MN_CLK_DISP_AXI_ROOT>, <&clk IMX8MN_CLK_DISP_APB_ROOT>, <&clk IMX8MN_CLK_DISP_PIXEL_ROOT>, <&clk IMX8MN_CLK_DSI_CORE>, <&clk IMX8MN_CLK_DSI_PHY_REF>, <&clk IMX8MN_CLK_CSI1_PHY_REF>, <&clk IMX8MN_CLK_CAMERA_PIXEL_ROOT>; clock-names = "disp_axi", "disp_apb", "disp_axi_root", "disp_apb_root", "lcdif-axi", "lcdif-apb", "lcdif-pix", "dsi-pclk", "dsi-ref", "csi-aclk", "csi-pclk"; assigned-clocks = <&clk IMX8MN_CLK_DSI_CORE>, <&clk IMX8MN_CLK_DSI_PHY_REF>, <&clk IMX8MN_CLK_DISP_PIXEL>, <&clk IMX8MN_CLK_DISP_AXI>, <&clk IMX8MN_CLK_DISP_APB>; assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_266M>, <&clk IMX8MN_CLK_24M>, <&clk IMX8MN_VIDEO_PLL1_OUT>, <&clk IMX8MN_SYS_PLL2_1000M>, <&clk IMX8MN_SYS_PLL1_800M>; assigned-clock-rates = <266000000>, <24000000>, <24000000>, <500000000>, <200000000>; #power-domain-cells = <1>; }; mipi_csi: mipi-csi@32e30000 { compatible = "fsl,imx8mm-mipi-csi2"; reg = <0x32e30000 0x1000>; interrupts = <GIC_SPI 17 IRQ_TYPE_LEVEL_HIGH>; assigned-clocks = <&clk IMX8MN_CLK_CAMERA_PIXEL>; assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_1000M>; assigned-clock-rates = <333000000>; clock-frequency = <333000000>; clocks = <&clk IMX8MN_CLK_DISP_APB_ROOT>, <&clk IMX8MN_CLK_CAMERA_PIXEL>, <&clk IMX8MN_CLK_CSI1_PHY_REF>, <&clk IMX8MN_CLK_DISP_AXI_ROOT>; clock-names = "pclk", "wrap", "phy", "axi"; power-domains = <&disp_blk_ctrl IMX8MN_DISPBLK_PD_MIPI_CSI>; status = "disabled"; ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; }; port@1 { reg = <1>; mipi_csi_out: endpoint { remote-endpoint = <&isi_in>; }; }; }; }; usbotg1: usb@32e40000 { compatible = "fsl,imx8mn-usb", "fsl,imx7d-usb", "fsl,imx27-usb"; reg = <0x32e40000 0x200>; interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_USB1_CTRL_ROOT>; assigned-clocks = <&clk IMX8MN_CLK_USB_BUS>; assigned-clock-parents = <&clk IMX8MN_SYS_PLL2_500M>; phys = <&usbphynop1>; fsl,usbmisc = <&usbmisc1 0>; power-domains = <&pgc_hsiomix>; status = "disabled"; }; usbmisc1: usbmisc@32e40200 { compatible = "fsl,imx8mn-usbmisc", "fsl,imx7d-usbmisc", "fsl,imx6q-usbmisc"; #index-cells = <1>; reg = <0x32e40200 0x200>; }; }; dma_apbh: dma-controller@33000000 { compatible = "fsl,imx7d-dma-apbh", "fsl,imx28-dma-apbh"; reg = <0x33000000 0x2000>; interrupts = <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>; #dma-cells = <1>; dma-channels = <4>; clocks = <&clk IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK>; }; gpmi: nand-controller@33002000 { compatible = "fsl,imx8mn-gpmi-nand", "fsl,imx7d-gpmi-nand"; #address-cells = <1>; #size-cells = <0>; reg = <0x33002000 0x2000>, <0x33004000 0x4000>; reg-names = "gpmi-nand", "bch"; interrupts = <GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "bch"; clocks = <&clk IMX8MN_CLK_NAND_ROOT>, <&clk IMX8MN_CLK_NAND_USDHC_BUS_RAWNAND_CLK>; clock-names = "gpmi_io", "gpmi_bch_apb"; dmas = <&dma_apbh 0>; dma-names = "rx-tx"; status = "disabled"; }; gpu: gpu@38000000 { compatible = "vivante,gc"; reg = <0x38000000 0x8000>; interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>; clocks = <&clk IMX8MN_CLK_GPU_AHB>, <&clk IMX8MN_CLK_GPU_BUS_ROOT>, <&clk IMX8MN_CLK_GPU_CORE_ROOT>, <&clk IMX8MN_CLK_GPU_SHADER>; clock-names = "reg", "bus", "core", "shader"; assigned-clocks = <&clk IMX8MN_CLK_GPU_CORE>, <&clk IMX8MN_CLK_GPU_SHADER>, <&clk IMX8MN_CLK_GPU_AXI>, <&clk IMX8MN_CLK_GPU_AHB>, <&clk IMX8MN_GPU_PLL>; assigned-clock-parents = <&clk IMX8MN_GPU_PLL_OUT>, <&clk IMX8MN_GPU_PLL_OUT>, <&clk IMX8MN_SYS_PLL1_800M>, <&clk IMX8MN_SYS_PLL1_800M>; assigned-clock-rates = <400000000>, <400000000>, <800000000>, <400000000>, <1200000000>; power-domains = <&pgc_gpumix>; }; gic: interrupt-controller@38800000 { compatible = "arm,gic-v3"; reg = <0x38800000 0x10000>, <0x38880000 0xc0000>; #interrupt-cells = <3>; interrupt-controller; interrupts = <GIC_PPI 9 IRQ_TYPE_LEVEL_HIGH>; }; ddrc: memory-controller@3d400000 { compatible = "fsl,imx8mn-ddrc", "fsl,imx8m-ddrc"; reg = <0x3d400000 0x400000>; clock-names = "core", "pll", "alt", "apb"; clocks = <&clk IMX8MN_CLK_DRAM_CORE>, <&clk IMX8MN_DRAM_PLL>, <&clk IMX8MN_CLK_DRAM_ALT>, <&clk IMX8MN_CLK_DRAM_APB>; }; ddr-pmu@3d800000 { compatible = "fsl,imx8mn-ddr-pmu", "fsl,imx8m-ddr-pmu"; reg = <0x3d800000 0x400000>; interrupts = <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>; }; }; usbphynop1: usbphynop1 { #phy-cells = <0>; compatible = "usb-nop-xceiv"; clocks = <&clk IMX8MN_CLK_USB_PHY_REF>; assigned-clocks = <&clk IMX8MN_CLK_USB_PHY_REF>; assigned-clock-parents = <&clk IMX8MN_SYS_PLL1_100M>; clock-names = "main_clk"; power-domains = <&pgc_otg1>; }; };
// SPDX-License-Identifier: GPL-2.0-only OR MIT /* * https://beagleboard.org/ai-64 * Copyright (C) 2022-2024 Texas Instruments Incorporated - https://www.ti.com/ * Copyright (C) 2022-2024 Jason Kridner, BeagleBoard.org Foundation * Copyright (C) 2022-2024 Robert Nelson, BeagleBoard.org Foundation */ /dts-v1/; #include "k3-j721e.dtsi" #include <dt-bindings/gpio/gpio.h> #include <dt-bindings/input/input.h> #include <dt-bindings/leds/common.h> #include <dt-bindings/net/ti-dp83867.h> #include <dt-bindings/phy/phy-cadence.h> / { compatible = "beagle,j721e-beagleboneai64", "ti,j721e"; model = "BeagleBoard.org BeagleBone AI-64"; aliases { serial0 = &wkup_uart0; serial2 = &main_uart0; mmc0 = &main_sdhci0; mmc1 = &main_sdhci1; i2c0 = &wkup_i2c0; i2c1 = &main_i2c6; i2c2 = &main_i2c2; i2c3 = &main_i2c4; }; chosen { stdout-path = "serial2:115200n8"; }; memory@80000000 { device_type = "memory"; /* 4G RAM */ reg = <0x00000000 0x80000000 0x00000000 0x80000000>, <0x00000008 0x80000000 0x00000000 0x80000000>; }; reserved_memory: reserved-memory { #address-cells = <2>; #size-cells = <2>; ranges; secure_ddr: optee@9e800000 { reg = <0x00 0x9e800000 0x00 0x01800000>; no-map; }; mcu_r5fss0_core0_dma_memory_region: r5f-dma-memory@a0000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa0000000 0x00 0x100000>; no-map; }; mcu_r5fss0_core0_memory_region: r5f-memory@a0100000 { compatible = "shared-dma-pool"; reg = <0x00 0xa0100000 0x00 0xf00000>; no-map; }; mcu_r5fss0_core1_dma_memory_region: r5f-dma-memory@a1000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa1000000 0x00 0x100000>; no-map; }; mcu_r5fss0_core1_memory_region: r5f-memory@a1100000 { compatible = "shared-dma-pool"; reg = <0x00 0xa1100000 0x00 0xf00000>; no-map; }; main_r5fss0_core0_dma_memory_region: r5f-dma-memory@a2000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa2000000 0x00 0x100000>; no-map; }; main_r5fss0_core0_memory_region: r5f-memory@a2100000 { compatible = "shared-dma-pool"; reg = <0x00 0xa2100000 0x00 0xf00000>; no-map; }; main_r5fss0_core1_dma_memory_region: r5f-dma-memory@a3000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa3000000 0x00 0x100000>; no-map; }; main_r5fss0_core1_memory_region: r5f-memory@a3100000 { compatible = "shared-dma-pool"; reg = <0x00 0xa3100000 0x00 0xf00000>; no-map; }; main_r5fss1_core0_dma_memory_region: r5f-dma-memory@a4000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa4000000 0x00 0x100000>; no-map; }; main_r5fss1_core0_memory_region: r5f-memory@a4100000 { compatible = "shared-dma-pool"; reg = <0x00 0xa4100000 0x00 0xf00000>; no-map; }; main_r5fss1_core1_dma_memory_region: r5f-dma-memory@a5000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa5000000 0x00 0x100000>; no-map; }; main_r5fss1_core1_memory_region: r5f-memory@a5100000 { compatible = "shared-dma-pool"; reg = <0x00 0xa5100000 0x00 0xf00000>; no-map; }; c66_0_dma_memory_region: c66-dma-memory@a6000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa6000000 0x00 0x100000>; no-map; }; c66_0_memory_region: c66-memory@a6100000 { compatible = "shared-dma-pool"; reg = <0x00 0xa6100000 0x00 0xf00000>; no-map; }; c66_1_dma_memory_region: c66-dma-memory@a7000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa7000000 0x00 0x100000>; no-map; }; c66_1_memory_region: c66-memory@a7100000 { compatible = "shared-dma-pool"; reg = <0x00 0xa7100000 0x00 0xf00000>; no-map; }; c71_0_dma_memory_region: c71-dma-memory@a8000000 { compatible = "shared-dma-pool"; reg = <0x00 0xa8000000 0x00 0x100000>; no-map; }; c71_0_memory_region: c71-memory@a8100000 { compatible = "shared-dma-pool"; reg = <0x00 0xa8100000 0x00 0xf00000>; no-map; }; rtos_ipc_memory_region: ipc-memories@aa000000 { reg = <0x00 0xaa000000 0x00 0x01c00000>; alignment = <0x1000>; no-map; }; }; gpio_keys: gpio-keys { compatible = "gpio-keys"; pinctrl-names = "default"; pinctrl-0 = <&sw_pwr_pins_default>; button-1 { label = "BOOT"; linux,code = <BTN_0>; gpios = <&wkup_gpio0 0 GPIO_ACTIVE_LOW>; }; button-2 { label = "POWER"; linux,code = <KEY_POWER>; gpios = <&wkup_gpio0 4 GPIO_ACTIVE_LOW>; }; }; leds { compatible = "gpio-leds"; pinctrl-names = "default"; pinctrl-0 = <&led_pins_default>; led-0 { gpios = <&main_gpio0 96 GPIO_ACTIVE_HIGH>; function = LED_FUNCTION_HEARTBEAT; linux,default-trigger = "heartbeat"; }; led-1 { gpios = <&main_gpio0 95 GPIO_ACTIVE_HIGH>; function = LED_FUNCTION_DISK_ACTIVITY; linux,default-trigger = "mmc0"; }; led-2 { gpios = <&main_gpio0 97 GPIO_ACTIVE_HIGH>; function = LED_FUNCTION_CPU; linux,default-trigger = "cpu"; }; led-3 { gpios = <&main_gpio0 110 GPIO_ACTIVE_HIGH>; function = LED_FUNCTION_DISK_ACTIVITY; linux,default-trigger = "mmc1"; }; led-4 { gpios = <&main_gpio0 109 GPIO_ACTIVE_HIGH>; function = LED_FUNCTION_WLAN; default-state = "off"; }; }; evm_12v0: regulator-0 { /* main supply */ compatible = "regulator-fixed"; regulator-name = "evm_12v0"; regulator-min-microvolt = <12000000>; regulator-max-microvolt = <12000000>; regulator-always-on; regulator-boot-on; }; vsys_3v3: regulator-1 { /* Output of LMS140 */ compatible = "regulator-fixed"; regulator-name = "vsys_3v3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; vin-supply = <&evm_12v0>; regulator-always-on; regulator-boot-on; }; vsys_5v0: regulator-2 { /* Output of LM5140 */ compatible = "regulator-fixed"; regulator-name = "vsys_5v0"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; vin-supply = <&evm_12v0>; regulator-always-on; regulator-boot-on; }; vdd_mmc1: regulator-3 { compatible = "regulator-fixed"; pinctrl-names = "default"; pinctrl-0 = <&sd_pwr_en_pins_default>; regulator-name = "vdd_mmc1"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; regulator-boot-on; enable-active-high; vin-supply = <&vsys_3v3>; gpio = <&main_gpio0 82 GPIO_ACTIVE_HIGH>; }; vdd_sd_dv_alt: regulator-4 { compatible = "regulator-gpio"; pinctrl-names = "default"; pinctrl-0 = <&vdd_sd_dv_alt_pins_default>; regulator-name = "tlv71033"; regulator-min-microvolt = <1800000>; regulator-max-microvolt = <3300000>; regulator-boot-on; vin-supply = <&vsys_5v0>; gpios = <&main_gpio0 117 GPIO_ACTIVE_HIGH>; states = <1800000 0x0>, <3300000 0x1>; }; dp_pwr_3v3: regulator-5 { compatible = "regulator-fixed"; pinctrl-names = "default"; pinctrl-0 = <&dp0_3v3_en_pins_default>; regulator-name = "dp-pwr"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&main_gpio0 49 GPIO_ACTIVE_HIGH>; /* DP0_PWR_SW_EN */ enable-active-high; }; dp0: connector { compatible = "dp-connector"; label = "DP0"; type = "full-size"; dp-pwr-supply = <&dp_pwr_3v3>; port { dp_connector_in: endpoint { remote-endpoint = <&dp0_out>; }; }; }; }; &main_pmx0 { led_pins_default: led-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x184, PIN_INPUT, 7) /* (T23) RGMII5_RD0.GPIO0_96 */ J721E_IOPAD(0x180, PIN_INPUT, 7) /* (R23) RGMII5_RD1.GPIO0_95 */ J721E_IOPAD(0x188, PIN_INPUT, 7) /* (Y28) RGMII6_TX_CTL.GPIO0_97 */ J721E_IOPAD(0x1bc, PIN_INPUT, 7) /* (V24) MDIO0_MDC.GPIO0_110 */ J721E_IOPAD(0x1b8, PIN_INPUT, 7) /* (V26) MDIO0_MDIO.GPIO0_109 */ >; }; main_mmc1_pins_default: main-mmc1-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x254, PIN_INPUT, 0) /* (R29) MMC1_CMD */ J721E_IOPAD(0x250, PIN_INPUT, 0) /* (P25) MMC1_CLK */ J721E_IOPAD(0x2ac, PIN_INPUT, 0) /* (P25) MMC1_CLKLB */ J721E_IOPAD(0x24c, PIN_INPUT, 0) /* (R24) MMC1_DAT0 */ J721E_IOPAD(0x248, PIN_INPUT, 0) /* (P24) MMC1_DAT1 */ J721E_IOPAD(0x244, PIN_INPUT, 0) /* (R25) MMC1_DAT2 */ J721E_IOPAD(0x240, PIN_INPUT, 0) /* (R26) MMC1_DAT3 */ J721E_IOPAD(0x258, PIN_INPUT, 0) /* (P23) MMC1_SDCD */ >; }; main_uart0_pins_default: main-uart0-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x1e8, PIN_INPUT, 0) /* (AB2) UART0_RXD */ J721E_IOPAD(0x1ec, PIN_OUTPUT, 0) /* (AB3) UART0_TXD */ >; }; sd_pwr_en_pins_default: sd-pwr-en-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x14c, PIN_INPUT, 7) /* (AA29) PRG0_PRU1_GPO19.GPIO0_82 */ >; }; vdd_sd_dv_alt_pins_default: vdd-sd-dv-alt-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x1d8, PIN_INPUT, 7) /* (W4) SPI1_CS1.GPIO0_117 */ >; }; main_usbss0_pins_default: main-usbss0-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x210, PIN_INPUT, 7) /* (W3) MCAN1_RX.GPIO1_3 - USBC_DIR */ >; }; main_usbss1_pins_default: main-usbss1-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x290, INPUT_DISABLE, 1) /* (U6) USB0_DRVVBUS.USB1_DRVVBUS */ >; }; dp0_3v3_en_pins_default:dp0-3v3-en-default-pins { pinctrl-single,pins = < J721E_IOPAD(0xc8, PIN_INPUT, 7) /* (AE26) PRG0_PRU0_GPO6.GPIO0_49 */ >; }; dp0_pins_default: dp0-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x1c4, PIN_INPUT, 5) /* (Y4) SPI0_CS1.DP0_HPD */ >; }; main_i2c0_pins_default: main-i2c0-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x220, PIN_INPUT_PULLUP, 0) /* (AC5) I2C0_SCL */ J721E_IOPAD(0x224, PIN_INPUT_PULLUP, 0) /* (AA5) I2C0_SDA */ >; }; main_i2c1_pins_default: main-i2c1-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x228, PIN_INPUT_PULLUP, 0) /* (Y6) I2C1_SCL */ J721E_IOPAD(0x22c, PIN_INPUT_PULLUP, 0) /* (AA6) I2C1_SDA */ >; }; main_i2c2_pins_default: main-i2c2-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x208, PIN_INPUT_PULLUP, 4) /* (W5) MCAN0_RX.I2C2_SCL */ J721E_IOPAD(0x20c, PIN_INPUT_PULLUP, 4) /* (W6) MCAN0_TX.I2C2_SDA */ J721E_IOPAD(0x138, PIN_INPUT, 7) /* (AE25) PRG0_PRU1_GPO14.GPIO0_77 */ J721E_IOPAD(0x13c, PIN_INPUT, 7) /* (AF29) PRG0_PRU1_GPO15.GPIO0_78 */ >; }; main_i2c3_pins_default: main-i2c3-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x270, PIN_INPUT_PULLUP, 4) /* (T26) MMC2_CLK.I2C3_SCL */ J721E_IOPAD(0x274, PIN_INPUT_PULLUP, 4) /* (T25) MMC2_CMD.I2C3_SDA */ >; }; main_i2c4_pins_default: main-i2c4-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x1e0, PIN_INPUT_PULLUP, 2) /* (Y5) SPI1_D0.I2C4_SCL */ J721E_IOPAD(0x1dc, PIN_INPUT_PULLUP, 2) /* (Y1) SPI1_CLK.I2C4_SDA */ J721E_IOPAD(0x30, PIN_INPUT, 7) /* (AF24) PRG1_PRU0_GPO11.GPIO0_12 */ J721E_IOPAD(0x34, PIN_INPUT, 7) /* (AJ24) PRG1_PRU0_GPO12.GPIO0_13 */ >; }; main_i2c5_pins_default: main-i2c5-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x150, PIN_INPUT_PULLUP, 2) /* (Y26) PRG0_MDIO0_MDIO.I2C5_SCL */ J721E_IOPAD(0x154, PIN_INPUT_PULLUP, 2) /* (AA27) PRG0_MDIO0_MDC.I2C5_SDA */ >; }; main_i2c6_pins_default: main-i2c6-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x1d0, PIN_INPUT_PULLUP, 2) /* (AA3) SPI0_D1.I2C6_SCL */ J721E_IOPAD(0x1e4, PIN_INPUT_PULLUP, 2) /* (Y2) SPI1_D1.I2C6_SDA */ J721E_IOPAD(0x74, PIN_INPUT, 7) /* (AC21) PRG1_PRU1_GPO7.GPIO0_28 */ J721E_IOPAD(0xa4, PIN_INPUT, 7) /* (AH22) PRG1_PRU1_GPO19.GPIO0_40 */ >; }; csi0_gpio_pins_default: csi0-gpio-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x19c, PIN_INPUT_PULLDOWN, 7) /* (W27) RGMII6_TD0.GPIO0_102 */ J721E_IOPAD(0x1a0, PIN_INPUT_PULLDOWN, 7) /* (W29) RGMII6_TXC.GPIO0_103 */ >; }; csi1_gpio_pins_default: csi1-gpio-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x198, PIN_INPUT_PULLDOWN, 7) /* (V25) RGMII6_TD1.GPIO0_101 */ J721E_IOPAD(0x1b0, PIN_INPUT_PULLDOWN, 7) /* (W24) RGMII6_RD1.GPIO0_107 */ >; }; pcie1_rst_pins_default: pcie1-rst-default-pins { pinctrl-single,pins = < J721E_IOPAD(0x5c, PIN_INPUT, 7) /* (AG23) PRG1_PRU1_GPO1.GPIO0_22 */ >; }; }; &wkup_pmx0 { eeprom_wp_pins_default: eeprom-wp-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0xc4, PIN_OUTPUT_PULLUP, 7) /* (G24) WKUP_GPIO0_5 */ >; }; mcu_adc0_pins_default: mcu-adc0-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0x130, PIN_INPUT, 0) /* (K25) MCU_ADC0_AIN0 */ J721E_WKUP_IOPAD(0x134, PIN_INPUT, 0) /* (K26) MCU_ADC0_AIN1 */ J721E_WKUP_IOPAD(0x138, PIN_INPUT, 0) /* (K28) MCU_ADC0_AIN2 */ J721E_WKUP_IOPAD(0x13c, PIN_INPUT, 0) /* (L28) MCU_ADC0_AIN3 */ J721E_WKUP_IOPAD(0x140, PIN_INPUT, 0) /* (K24) MCU_ADC0_AIN4 */ J721E_WKUP_IOPAD(0x144, PIN_INPUT, 0) /* (K27) MCU_ADC0_AIN5 */ J721E_WKUP_IOPAD(0x148, PIN_INPUT, 0) /* (K29) MCU_ADC0_AIN6 */ >; }; mcu_adc1_pins_default: mcu-adc1-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0x150, PIN_INPUT, 0) /* (N23) MCU_ADC1_AIN0 */ >; }; mikro_bus_pins_default: mikro-bus-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0x108, PIN_INPUT, 7) /* SDAPULLEN (E26) PMIC_POWER_EN0.WKUP_GPIO0_66 */ J721E_WKUP_IOPAD(0xd4, PIN_INPUT, 7) /* SDA (G26) WKUP_GPIO0_9.MCU_I2C1_SDA */ J721E_WKUP_IOPAD(0xf4, PIN_INPUT, 7) /* SDA (D25) MCU_I3C0_SDA.WKUP_GPIO0_61 */ J721E_WKUP_IOPAD(0xd0, PIN_INPUT, 7) /* SCL (G27) WKUP_GPIO0_8.MCU_I2C1_SCL */ J721E_WKUP_IOPAD(0xf0, PIN_INPUT, 7) /* SCL (D26) MCU_I3C0_SCL.WKUP_GPIO0_60 */ J721E_WKUP_IOPAD(0xb8, PIN_INPUT, 7) /* MOSI (F28) WKUP_GPIO0_2.MCU_SPI1_D1 */ J721E_WKUP_IOPAD(0xb4, PIN_INPUT, 7) /* MISO (F25) WKUP_GPIO0_1.MCU_SPI1_D0 */ J721E_WKUP_IOPAD(0xb0, PIN_INPUT, 7) /* CLK (F26) WKUP_GPIO0_0.MCU_SPI1_CLK */ J721E_WKUP_IOPAD(0xbc, PIN_INPUT, 7) /* CS (F27) WKUP_GPIO0_3.MCU_SPI1_CS0 */ J721E_WKUP_IOPAD(0x44, PIN_INPUT, 7) /* RX (G22) MCU_OSPI1_D1.WKUP_GPIO0_33 */ J721E_WKUP_IOPAD(0x48, PIN_INPUT, 7) /* TX (D23) MCU_OSPI1_D2.WKUP_GPIO0_34 */ J721E_WKUP_IOPAD(0x4c, PIN_INPUT, 7) /* INT (C23) MCU_OSPI1_D3.WKUP_GPIO0_35 */ J721E_WKUP_IOPAD(0x54, PIN_INPUT, 7) /* RST (E22) MCU_OSPI1_CSn1.WKUP_GPIO0_37 */ J721E_WKUP_IOPAD(0xdc, PIN_INPUT, 7) /* PWM (H27) WKUP_GPIO0_11 */ J721E_WKUP_IOPAD(0xac, PIN_INPUT, 7) /* AN (C29) MCU_MCAN0_RX.WKUP_GPIO0_59 */ >; }; mcu_cpsw_pins_default: mcu-cpsw-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0x84, PIN_INPUT, 0) /* (B24) MCU_RGMII1_RD0 */ J721E_WKUP_IOPAD(0x80, PIN_INPUT, 0) /* (A24) MCU_RGMII1_RD1 */ J721E_WKUP_IOPAD(0x7c, PIN_INPUT, 0) /* (D24) MCU_RGMII1_RD2 */ J721E_WKUP_IOPAD(0x78, PIN_INPUT, 0) /* (A25) MCU_RGMII1_RD3 */ J721E_WKUP_IOPAD(0x74, PIN_INPUT, 0) /* (C24) MCU_RGMII1_RXC */ J721E_WKUP_IOPAD(0x5c, PIN_INPUT, 0) /* (C25) MCU_RGMII1_RX_CTL */ J721E_WKUP_IOPAD(0x6c, PIN_OUTPUT, 0) /* (B25) MCU_RGMII1_TD0 */ J721E_WKUP_IOPAD(0x68, PIN_OUTPUT, 0) /* (A26) MCU_RGMII1_TD1 */ J721E_WKUP_IOPAD(0x64, PIN_OUTPUT, 0) /* (A27) MCU_RGMII1_TD2 */ J721E_WKUP_IOPAD(0x60, PIN_OUTPUT, 0) /* (A28) MCU_RGMII1_TD3 */ J721E_WKUP_IOPAD(0x70, PIN_OUTPUT, 0) /* (B26) MCU_RGMII1_TXC */ J721E_WKUP_IOPAD(0x58, PIN_OUTPUT, 0) /* (B27) MCU_RGMII1_TX_CTL */ >; }; mcu_mdio_pins_default: mcu-mdio1-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0x8c, PIN_OUTPUT, 0) /* (F23) MCU_MDIO0_MDC */ J721E_WKUP_IOPAD(0x88, PIN_INPUT, 0) /* (E23) MCU_MDIO0_MDIO */ >; }; sw_pwr_pins_default: sw-pwr-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0xc0, PIN_INPUT, 7) /* (G25) WKUP_GPIO0_4 */ >; }; wkup_i2c0_pins_default: wkup-i2c0-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0xf8, PIN_INPUT_PULLUP, 0) /* (J25) WKUP_I2C0_SCL */ J721E_WKUP_IOPAD(0xfc, PIN_INPUT_PULLUP, 0) /* (H24) WKUP_I2C0_SDA */ >; }; wkup_uart0_pins_default: wkup-uart0-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0xa0, PIN_INPUT, 0) /* (J29) WKUP_UART0_RXD */ J721E_WKUP_IOPAD(0xa4, PIN_OUTPUT, 0) /* (J28) WKUP_UART0_TXD */ >; }; mcu_usbss1_pins_default: mcu-usbss1-default-pins { pinctrl-single,pins = < J721E_WKUP_IOPAD(0x3c, PIN_OUTPUT_PULLUP, 5) /* (A23) MCU_OSPI1_LBCLKO.WKUP_GPIO0_30 */ >; }; }; &wkup_uart0 { /* Wakeup UART is used by TIFS firmware. */ status = "reserved"; pinctrl-names = "default"; pinctrl-0 = <&wkup_uart0_pins_default>; }; &main_uart0 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&main_uart0_pins_default>; /* Shared with ATF on this platform */ power-domains = <&k3_pds 146 TI_SCI_PD_SHARED>; }; &main_sdhci0 { /* eMMC */ status = "okay"; non-removable; ti,driver-strength-ohm = <50>; disable-wp; }; &main_sdhci1 { /* SD Card */ status = "okay"; vmmc-supply = <&vdd_mmc1>; vqmmc-supply = <&vdd_sd_dv_alt>; pinctrl-names = "default"; pinctrl-0 = <&main_mmc1_pins_default>; ti,driver-strength-ohm = <50>; disable-wp; }; &main_i2c0 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&main_i2c0_pins_default>; clock-frequency = <400000>; }; &main_i2c1 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&main_i2c1_pins_default>; clock-frequency = <400000>; }; &main_i2c2 { /* BBB Header: P9.19 and P9.20 */ status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&main_i2c2_pins_default>; clock-frequency = <100000>; }; &main_i2c3 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&main_i2c3_pins_default>; clock-frequency = <400000>; }; &main_i2c4 { /* BBB Header: P9.24 and P9.26 */ status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&main_i2c4_pins_default>; clock-frequency = <100000>; }; &main_i2c5 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&main_i2c5_pins_default>; clock-frequency = <400000>; }; &main_i2c6 { /* BBB Header: P9.17 and P9.18 */ status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&main_i2c6_pins_default>; clock-frequency = <100000>; status = "okay"; }; &wkup_i2c0 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&wkup_i2c0_pins_default>; clock-frequency = <400000>; eeprom@50 { compatible = "atmel,24c04"; reg = <0x50>; pinctrl-names = "default"; pinctrl-0 = <&eeprom_wp_pins_default>; }; }; &wkup_gpio0 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&mcu_adc0_pins_default>, <&mcu_adc1_pins_default>, <&mikro_bus_pins_default>; }; &main_gpio0 { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&csi1_gpio_pins_default>, <&csi0_gpio_pins_default>; }; &main_gpio1 { status = "okay"; }; &usb_serdes_mux { idle-states = <1>, <1>; /* USB0 to SERDES3, USB1 to SERDES2 */ }; &serdes_ln_ctrl { idle-states = <J721E_SERDES0_LANE0_IP4_UNUSED>, <J721E_SERDES0_LANE1_IP4_UNUSED>, <J721E_SERDES1_LANE0_PCIE1_LANE0>, <J721E_SERDES1_LANE1_PCIE1_LANE1>, <J721E_SERDES2_LANE0_IP1_UNUSED>, <J721E_SERDES2_LANE1_USB3_1>, <J721E_SERDES3_LANE0_USB3_0_SWAP>, <J721E_SERDES3_LANE1_USB3_0>, <J721E_SERDES4_LANE0_EDP_LANE0>, <J721E_SERDES4_LANE1_EDP_LANE1>, <J721E_SERDES4_LANE2_EDP_LANE2>, <J721E_SERDES4_LANE3_EDP_LANE3>; }; &serdes_wiz3 { typec-dir-gpios = <&main_gpio1 3 GPIO_ACTIVE_LOW>; typec-dir-debounce-ms = <700>; /* TUSB321, tCCB_DEFAULT 133 ms */ }; &serdes3 { serdes3_usb_link: phy@0 { reg = <0>; cdns,num-lanes = <2>; #phy-cells = <0>; cdns,phy-type = <PHY_TYPE_USB3>; resets = <&serdes_wiz3 1>, <&serdes_wiz3 2>; }; }; &serdes4 { torrent_phy_dp: phy@0 { reg = <0>; resets = <&serdes_wiz4 1>; cdns,phy-type = <PHY_TYPE_DP>; cdns,num-lanes = <4>; cdns,max-bit-rate = <5400>; #phy-cells = <0>; }; }; &mhdp { phys = <&torrent_phy_dp>; phy-names = "dpphy"; pinctrl-names = "default"; pinctrl-0 = <&dp0_pins_default>; }; &usbss0 { pinctrl-names = "default"; pinctrl-0 = <&main_usbss0_pins_default>; ti,vbus-divider; }; &usb0 { dr_mode = "peripheral"; maximum-speed = "super-speed"; phys = <&serdes3_usb_link>; phy-names = "cdns3,usb3-phy"; }; &serdes2 { serdes2_usb_link: phy@1 { reg = <1>; cdns,num-lanes = <1>; #phy-cells = <0>; cdns,phy-type = <PHY_TYPE_USB3>; resets = <&serdes_wiz2 2>; }; }; &usbss1 { pinctrl-names = "default"; pinctrl-0 = <&main_usbss1_pins_default>, <&mcu_usbss1_pins_default>; ti,vbus-divider; }; &usb1 { dr_mode = "host"; maximum-speed = "super-speed"; phys = <&serdes2_usb_link>; phy-names = "cdns3,usb3-phy"; }; &tscadc0 { status = "okay"; /* BBB Header: P9.39, P9.40, P9.37, P9.38, P9.33, P9.36, P9.35 */ adc { ti,adc-channels = <0 1 2 3 4 5 6>; }; }; &tscadc1 { status = "okay"; /* MCU mikroBUS Header J10.1 - MCU_ADC1_AIN0 */ adc { ti,adc-channels = <0>; }; }; &mcu_cpsw { pinctrl-names = "default"; pinctrl-0 = <&mcu_cpsw_pins_default>; }; &davinci_mdio { pinctrl-names = "default"; pinctrl-0 = <&mcu_mdio_pins_default>; phy0: ethernet-phy@0 { reg = <0>; ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_00_NS>; ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_4_B_NIB>; }; }; &cpsw_port1 { phy-mode = "rgmii-rxid"; phy-handle = <&phy0>; }; &dss { /* * These clock assignments are chosen to enable the following outputs: * * VP0 - DisplayPort SST * VP1 - DPI0 * VP2 - DSI * VP3 - DPI1 */ assigned-clocks = <&k3_clks 152 1>, /* VP 1 pixel clock */ <&k3_clks 152 4>, /* VP 2 pixel clock */ <&k3_clks 152 9>, /* VP 3 pixel clock */ <&k3_clks 152 13>; /* VP 4 pixel clock */ assigned-clock-parents = <&k3_clks 152 2>, /* PLL16_HSDIV0 */ <&k3_clks 152 6>, /* PLL19_HSDIV0 */ <&k3_clks 152 11>, /* PLL18_HSDIV0 */ <&k3_clks 152 18>; /* PLL23_HSDIV0 */ }; &dss_ports { port { dpi0_out: endpoint { remote-endpoint = <&dp0_in>; }; }; }; &dp0_ports { #address-cells = <1>; #size-cells = <0>; port@0 { reg = <0>; dp0_in: endpoint { remote-endpoint = <&dpi0_out>; }; }; port@4 { reg = <4>; dp0_out: endpoint { remote-endpoint = <&dp_connector_in>; }; }; }; &serdes0 { serdes0_pcie_link: phy@0 { reg = <0>; cdns,num-lanes = <1>; #phy-cells = <0>; cdns,phy-type = <PHY_TYPE_PCIE>; resets = <&serdes_wiz0 1>; }; }; &serdes1 { serdes1_pcie_link: phy@0 { reg = <0>; cdns,num-lanes = <2>; #phy-cells = <0>; cdns,phy-type = <PHY_TYPE_PCIE>; resets = <&serdes_wiz1 1>, <&serdes_wiz1 2>; }; }; &pcie1_rc { status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&pcie1_rst_pins_default>; phys = <&serdes1_pcie_link>; phy-names = "pcie-phy"; num-lanes = <2>; max-link-speed = <3>; reset-gpios = <&main_gpio0 22 GPIO_ACTIVE_HIGH>; }; &ufs_wrapper { status = "disabled"; }; &mailbox0_cluster0 { status = "okay"; interrupts = <436>; mbox_mcu_r5fss0_core0: mbox-mcu-r5fss0-core0 { ti,mbox-rx = <0 0 0>; ti,mbox-tx = <1 0 0>; }; mbox_mcu_r5fss0_core1: mbox-mcu-r5fss0-core1 { ti,mbox-rx = <2 0 0>; ti,mbox-tx = <3 0 0>; }; }; &mailbox0_cluster1 { status = "okay"; interrupts = <432>; mbox_main_r5fss0_core0: mbox-main-r5fss0-core0 { ti,mbox-rx = <0 0 0>; ti,mbox-tx = <1 0 0>; }; mbox_main_r5fss0_core1: mbox-main-r5fss0-core1 { ti,mbox-rx = <2 0 0>; ti,mbox-tx = <3 0 0>; }; }; &mailbox0_cluster2 { status = "okay"; interrupts = <428>; mbox_main_r5fss1_core0: mbox-main-r5fss1-core0 { ti,mbox-rx = <0 0 0>; ti,mbox-tx = <1 0 0>; }; mbox_main_r5fss1_core1: mbox-main-r5fss1-core1 { ti,mbox-rx = <2 0 0>; ti,mbox-tx = <3 0 0>; }; }; &mailbox0_cluster3 { status = "okay"; interrupts = <424>; mbox_c66_0: mbox-c66-0 { ti,mbox-rx = <0 0 0>; ti,mbox-tx = <1 0 0>; }; mbox_c66_1: mbox-c66-1 { ti,mbox-rx = <2 0 0>; ti,mbox-tx = <3 0 0>; }; }; &mailbox0_cluster4 { status = "okay"; interrupts = <420>; mbox_c71_0: mbox-c71-0 { ti,mbox-rx = <0 0 0>; ti,mbox-tx = <1 0 0>; }; }; &mcu_r5fss0_core0 { mboxes = <&mailbox0_cluster0 &mbox_mcu_r5fss0_core0>; memory-region = <&mcu_r5fss0_core0_dma_memory_region>, <&mcu_r5fss0_core0_memory_region>; }; &mcu_r5fss0_core1 { mboxes = <&mailbox0_cluster0 &mbox_mcu_r5fss0_core1>; memory-region = <&mcu_r5fss0_core1_dma_memory_region>, <&mcu_r5fss0_core1_memory_region>; }; &main_r5fss0_core0 { mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core0>; memory-region = <&main_r5fss0_core0_dma_memory_region>, <&main_r5fss0_core0_memory_region>; }; &main_r5fss0_core1 { mboxes = <&mailbox0_cluster1 &mbox_main_r5fss0_core1>; memory-region = <&main_r5fss0_core1_dma_memory_region>, <&main_r5fss0_core1_memory_region>; }; &main_r5fss1_core0 { mboxes = <&mailbox0_cluster2 &mbox_main_r5fss1_core0>; memory-region = <&main_r5fss1_core0_dma_memory_region>, <&main_r5fss1_core0_memory_region>; }; &main_r5fss1_core1 { mboxes = <&mailbox0_cluster2 &mbox_main_r5fss1_core1>; memory-region = <&main_r5fss1_core1_dma_memory_region>, <&main_r5fss1_core1_memory_region>; }; &c66_0 { status = "okay"; mboxes = <&mailbox0_cluster3 &mbox_c66_0>; memory-region = <&c66_0_dma_memory_region>, <&c66_0_memory_region>; }; &c66_1 { status = "okay"; mboxes = <&mailbox0_cluster3 &mbox_c66_1>; memory-region = <&c66_1_dma_memory_region>, <&c66_1_memory_region>; }; &c71_0 { status = "okay"; mboxes = <&mailbox0_cluster4 &mbox_c71_0>; memory-region = <&c71_0_dma_memory_region>, <&c71_0_memory_region>; };
// SPDX-License-Identifier: GPL-2.0-or-later /* * Asynchronous Cryptographic Hash operations. * * This is the implementation of the ahash (asynchronous hash) API. It differs * from shash (synchronous hash) in that ahash supports asynchronous operations, * and it hashes data from scatterlists instead of virtually addressed buffers. * * The ahash API provides access to both ahash and shash algorithms. The shash * API only provides access to shash algorithms. * * Copyright (c) 2008 Loc Ho <[email protected]> */ #include <crypto/scatterwalk.h> #include <linux/cryptouser.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/seq_file.h> #include <linux/string.h> #include <net/netlink.h> #include "hash.h" #define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e /* * For an ahash tfm that is using an shash algorithm (instead of an ahash * algorithm), this returns the underlying shash tfm. */ static inline struct crypto_shash *ahash_to_shash(struct crypto_ahash *tfm) { return *(struct crypto_shash **)crypto_ahash_ctx(tfm); } static inline struct shash_desc *prepare_shash_desc(struct ahash_request *req, struct crypto_ahash *tfm) { struct shash_desc *desc = ahash_request_ctx(req); desc->tfm = ahash_to_shash(tfm); return desc; } int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) { struct crypto_hash_walk walk; int nbytes; for (nbytes = crypto_hash_walk_first(req, &walk); nbytes > 0; nbytes = crypto_hash_walk_done(&walk, nbytes)) nbytes = crypto_shash_update(desc, walk.data, nbytes); return nbytes; } EXPORT_SYMBOL_GPL(shash_ahash_update); int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) { struct crypto_hash_walk walk; int nbytes; nbytes = crypto_hash_walk_first(req, &walk); if (!nbytes) return crypto_shash_final(desc, req->result); do { nbytes = crypto_hash_walk_last(&walk) ? crypto_shash_finup(desc, walk.data, nbytes, req->result) : crypto_shash_update(desc, walk.data, nbytes); nbytes = crypto_hash_walk_done(&walk, nbytes); } while (nbytes > 0); return nbytes; } EXPORT_SYMBOL_GPL(shash_ahash_finup); int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) { unsigned int nbytes = req->nbytes; struct scatterlist *sg; unsigned int offset; int err; if (nbytes && (sg = req->src, offset = sg->offset, nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) { void *data; data = kmap_local_page(sg_page(sg)); err = crypto_shash_digest(desc, data + offset, nbytes, req->result); kunmap_local(data); } else err = crypto_shash_init(desc) ?: shash_ahash_finup(req, desc); return err; } EXPORT_SYMBOL_GPL(shash_ahash_digest); static void crypto_exit_ahash_using_shash(struct crypto_tfm *tfm) { struct crypto_shash **ctx = crypto_tfm_ctx(tfm); crypto_free_shash(*ctx); } static int crypto_init_ahash_using_shash(struct crypto_tfm *tfm) { struct crypto_alg *calg = tfm->__crt_alg; struct crypto_ahash *crt = __crypto_ahash_cast(tfm); struct crypto_shash **ctx = crypto_tfm_ctx(tfm); struct crypto_shash *shash; if (!crypto_mod_get(calg)) return -EAGAIN; shash = crypto_create_tfm(calg, &crypto_shash_type); if (IS_ERR(shash)) { crypto_mod_put(calg); return PTR_ERR(shash); } crt->using_shash = true; *ctx = shash; tfm->exit = crypto_exit_ahash_using_shash; crypto_ahash_set_flags(crt, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); crt->reqsize = sizeof(struct shash_desc) + crypto_shash_descsize(shash); return 0; } static int hash_walk_next(struct crypto_hash_walk *walk) { unsigned int offset = walk->offset; unsigned int nbytes = min(walk->entrylen, ((unsigned int)(PAGE_SIZE)) - offset); walk->data = kmap_local_page(walk->pg); walk->data += offset; walk->entrylen -= nbytes; return nbytes; } static int hash_walk_new_entry(struct crypto_hash_walk *walk) { struct scatterlist *sg; sg = walk->sg; walk->offset = sg->offset; walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); walk->offset = offset_in_page(walk->offset); walk->entrylen = sg->length; if (walk->entrylen > walk->total) walk->entrylen = walk->total; walk->total -= walk->entrylen; return hash_walk_next(walk); } int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err) { walk->data -= walk->offset; kunmap_local(walk->data); crypto_yield(walk->flags); if (err) return err; if (walk->entrylen) { walk->offset = 0; walk->pg++; return hash_walk_next(walk); } if (!walk->total) return 0; walk->sg = sg_next(walk->sg); return hash_walk_new_entry(walk); } EXPORT_SYMBOL_GPL(crypto_hash_walk_done); int crypto_hash_walk_first(struct ahash_request *req, struct crypto_hash_walk *walk) { walk->total = req->nbytes; if (!walk->total) { walk->entrylen = 0; return 0; } walk->sg = req->src; walk->flags = req->base.flags; return hash_walk_new_entry(walk); } EXPORT_SYMBOL_GPL(crypto_hash_walk_first); static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { return -ENOSYS; } static void ahash_set_needkey(struct crypto_ahash *tfm, struct ahash_alg *alg) { if (alg->setkey != ahash_nosetkey && !(alg->halg.base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY)) crypto_ahash_set_flags(tfm, CRYPTO_TFM_NEED_KEY); } int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key, unsigned int keylen) { if (likely(tfm->using_shash)) { struct crypto_shash *shash = ahash_to_shash(tfm); int err; err = crypto_shash_setkey(shash, key, keylen); if (unlikely(err)) { crypto_ahash_set_flags(tfm, crypto_shash_get_flags(shash) & CRYPTO_TFM_NEED_KEY); return err; } } else { struct ahash_alg *alg = crypto_ahash_alg(tfm); int err; err = alg->setkey(tfm, key, keylen); if (unlikely(err)) { ahash_set_needkey(tfm, alg); return err; } } crypto_ahash_clear_flags(tfm, CRYPTO_TFM_NEED_KEY); return 0; } EXPORT_SYMBOL_GPL(crypto_ahash_setkey); int crypto_ahash_init(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (likely(tfm->using_shash)) return crypto_shash_init(prepare_shash_desc(req, tfm)); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return crypto_ahash_alg(tfm)->init(req); } EXPORT_SYMBOL_GPL(crypto_ahash_init); static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt, bool has_state) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); unsigned int ds = crypto_ahash_digestsize(tfm); struct ahash_request *subreq; unsigned int subreq_size; unsigned int reqsize; u8 *result; gfp_t gfp; u32 flags; subreq_size = sizeof(*subreq); reqsize = crypto_ahash_reqsize(tfm); reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment()); subreq_size += reqsize; subreq_size += ds; flags = ahash_request_flags(req); gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC; subreq = kmalloc(subreq_size, gfp); if (!subreq) return -ENOMEM; ahash_request_set_tfm(subreq, tfm); ahash_request_set_callback(subreq, flags, cplt, req); result = (u8 *)(subreq + 1) + reqsize; ahash_request_set_crypt(subreq, req->src, result, req->nbytes); if (has_state) { void *state; state = kmalloc(crypto_ahash_statesize(tfm), gfp); if (!state) { kfree(subreq); return -ENOMEM; } crypto_ahash_export(req, state); crypto_ahash_import(subreq, state); kfree_sensitive(state); } req->priv = subreq; return 0; } static void ahash_restore_req(struct ahash_request *req, int err) { struct ahash_request *subreq = req->priv; if (!err) memcpy(req->result, subreq->result, crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); req->priv = NULL; kfree_sensitive(subreq); } int crypto_ahash_update(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (likely(tfm->using_shash)) return shash_ahash_update(req, ahash_request_ctx(req)); return crypto_ahash_alg(tfm)->update(req); } EXPORT_SYMBOL_GPL(crypto_ahash_update); int crypto_ahash_final(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (likely(tfm->using_shash)) return crypto_shash_final(ahash_request_ctx(req), req->result); return crypto_ahash_alg(tfm)->final(req); } EXPORT_SYMBOL_GPL(crypto_ahash_final); int crypto_ahash_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (likely(tfm->using_shash)) return shash_ahash_finup(req, ahash_request_ctx(req)); return crypto_ahash_alg(tfm)->finup(req); } EXPORT_SYMBOL_GPL(crypto_ahash_finup); int crypto_ahash_digest(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (likely(tfm->using_shash)) return shash_ahash_digest(req, prepare_shash_desc(req, tfm)); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return crypto_ahash_alg(tfm)->digest(req); } EXPORT_SYMBOL_GPL(crypto_ahash_digest); static void ahash_def_finup_done2(void *data, int err) { struct ahash_request *areq = data; if (err == -EINPROGRESS) return; ahash_restore_req(areq, err); ahash_request_complete(areq, err); } static int ahash_def_finup_finish1(struct ahash_request *req, int err) { struct ahash_request *subreq = req->priv; if (err) goto out; subreq->base.complete = ahash_def_finup_done2; err = crypto_ahash_alg(crypto_ahash_reqtfm(req))->final(subreq); if (err == -EINPROGRESS || err == -EBUSY) return err; out: ahash_restore_req(req, err); return err; } static void ahash_def_finup_done1(void *data, int err) { struct ahash_request *areq = data; struct ahash_request *subreq; if (err == -EINPROGRESS) goto out; subreq = areq->priv; subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG; err = ahash_def_finup_finish1(areq, err); if (err == -EINPROGRESS || err == -EBUSY) return; out: ahash_request_complete(areq, err); } static int ahash_def_finup(struct ahash_request *req) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); int err; err = ahash_save_req(req, ahash_def_finup_done1, true); if (err) return err; err = crypto_ahash_alg(tfm)->update(req->priv); if (err == -EINPROGRESS || err == -EBUSY) return err; return ahash_def_finup_finish1(req, err); } int crypto_ahash_export(struct ahash_request *req, void *out) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (likely(tfm->using_shash)) return crypto_shash_export(ahash_request_ctx(req), out); return crypto_ahash_alg(tfm)->export(req, out); } EXPORT_SYMBOL_GPL(crypto_ahash_export); int crypto_ahash_import(struct ahash_request *req, const void *in) { struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); if (likely(tfm->using_shash)) return crypto_shash_import(prepare_shash_desc(req, tfm), in); if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY) return -ENOKEY; return crypto_ahash_alg(tfm)->import(req, in); } EXPORT_SYMBOL_GPL(crypto_ahash_import); static void crypto_ahash_exit_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); alg->exit_tfm(hash); } static int crypto_ahash_init_tfm(struct crypto_tfm *tfm) { struct crypto_ahash *hash = __crypto_ahash_cast(tfm); struct ahash_alg *alg = crypto_ahash_alg(hash); crypto_ahash_set_statesize(hash, alg->halg.statesize); if (tfm->__crt_alg->cra_type == &crypto_shash_type) return crypto_init_ahash_using_shash(tfm); ahash_set_needkey(hash, alg); if (alg->exit_tfm) tfm->exit = crypto_ahash_exit_tfm; return alg->init_tfm ? alg->init_tfm(hash) : 0; } static unsigned int crypto_ahash_extsize(struct crypto_alg *alg) { if (alg->cra_type == &crypto_shash_type) return sizeof(struct crypto_shash *); return crypto_alg_extsize(alg); } static void crypto_ahash_free_instance(struct crypto_instance *inst) { struct ahash_instance *ahash = ahash_instance(inst); ahash->free(ahash); } static int __maybe_unused crypto_ahash_report( struct sk_buff *skb, struct crypto_alg *alg) { struct crypto_report_hash rhash; memset(&rhash, 0, sizeof(rhash)); strscpy(rhash.type, "ahash", sizeof(rhash.type)); rhash.blocksize = alg->cra_blocksize; rhash.digestsize = __crypto_hash_alg_common(alg)->digestsize; return nla_put(skb, CRYPTOCFGA_REPORT_HASH, sizeof(rhash), &rhash); } static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) __maybe_unused; static void crypto_ahash_show(struct seq_file *m, struct crypto_alg *alg) { seq_printf(m, "type : ahash\n"); seq_printf(m, "async : %s\n", alg->cra_flags & CRYPTO_ALG_ASYNC ? "yes" : "no"); seq_printf(m, "blocksize : %u\n", alg->cra_blocksize); seq_printf(m, "digestsize : %u\n", __crypto_hash_alg_common(alg)->digestsize); } static const struct crypto_type crypto_ahash_type = { .extsize = crypto_ahash_extsize, .init_tfm = crypto_ahash_init_tfm, .free = crypto_ahash_free_instance, #ifdef CONFIG_PROC_FS .show = crypto_ahash_show, #endif #if IS_ENABLED(CONFIG_CRYPTO_USER) .report = crypto_ahash_report, #endif .maskclear = ~CRYPTO_ALG_TYPE_MASK, .maskset = CRYPTO_ALG_TYPE_AHASH_MASK, .type = CRYPTO_ALG_TYPE_AHASH, .tfmsize = offsetof(struct crypto_ahash, base), }; int crypto_grab_ahash(struct crypto_ahash_spawn *spawn, struct crypto_instance *inst, const char *name, u32 type, u32 mask) { spawn->base.frontend = &crypto_ahash_type; return crypto_grab_spawn(&spawn->base, inst, name, type, mask); } EXPORT_SYMBOL_GPL(crypto_grab_ahash); struct crypto_ahash *crypto_alloc_ahash(const char *alg_name, u32 type, u32 mask) { return crypto_alloc_tfm(alg_name, &crypto_ahash_type, type, mask); } EXPORT_SYMBOL_GPL(crypto_alloc_ahash); int crypto_has_ahash(const char *alg_name, u32 type, u32 mask) { return crypto_type_has_alg(alg_name, &crypto_ahash_type, type, mask); } EXPORT_SYMBOL_GPL(crypto_has_ahash); static bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg) { struct crypto_alg *alg = &halg->base; if (alg->cra_type == &crypto_shash_type) return crypto_shash_alg_has_setkey(__crypto_shash_alg(alg)); return __crypto_ahash_alg(alg)->setkey != ahash_nosetkey; } struct crypto_ahash *crypto_clone_ahash(struct crypto_ahash *hash) { struct hash_alg_common *halg = crypto_hash_alg_common(hash); struct crypto_tfm *tfm = crypto_ahash_tfm(hash); struct crypto_ahash *nhash; struct ahash_alg *alg; int err; if (!crypto_hash_alg_has_setkey(halg)) { tfm = crypto_tfm_get(tfm); if (IS_ERR(tfm)) return ERR_CAST(tfm); return hash; } nhash = crypto_clone_tfm(&crypto_ahash_type, tfm); if (IS_ERR(nhash)) return nhash; nhash->reqsize = hash->reqsize; nhash->statesize = hash->statesize; if (likely(hash->using_shash)) { struct crypto_shash **nctx = crypto_ahash_ctx(nhash); struct crypto_shash *shash; shash = crypto_clone_shash(ahash_to_shash(hash)); if (IS_ERR(shash)) { err = PTR_ERR(shash); goto out_free_nhash; } nhash->using_shash = true; *nctx = shash; return nhash; } err = -ENOSYS; alg = crypto_ahash_alg(hash); if (!alg->clone_tfm) goto out_free_nhash; err = alg->clone_tfm(nhash, hash); if (err) goto out_free_nhash; return nhash; out_free_nhash: crypto_free_ahash(nhash); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(crypto_clone_ahash); static int ahash_prepare_alg(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; int err; if (alg->halg.statesize == 0) return -EINVAL; err = hash_prepare_alg(&alg->halg); if (err) return err; base->cra_type = &crypto_ahash_type; base->cra_flags |= CRYPTO_ALG_TYPE_AHASH; if (!alg->finup) alg->finup = ahash_def_finup; if (!alg->setkey) alg->setkey = ahash_nosetkey; return 0; } int crypto_register_ahash(struct ahash_alg *alg) { struct crypto_alg *base = &alg->halg.base; int err; err = ahash_prepare_alg(alg); if (err) return err; return crypto_register_alg(base); } EXPORT_SYMBOL_GPL(crypto_register_ahash); void crypto_unregister_ahash(struct ahash_alg *alg) { crypto_unregister_alg(&alg->halg.base); } EXPORT_SYMBOL_GPL(crypto_unregister_ahash); int crypto_register_ahashes(struct ahash_alg *algs, int count) { int i, ret; for (i = 0; i < count; i++) { ret = crypto_register_ahash(&algs[i]); if (ret) goto err; } return 0; err: for (--i; i >= 0; --i) crypto_unregister_ahash(&algs[i]); return ret; } EXPORT_SYMBOL_GPL(crypto_register_ahashes); void crypto_unregister_ahashes(struct ahash_alg *algs, int count) { int i; for (i = count - 1; i >= 0; --i) crypto_unregister_ahash(&algs[i]); } EXPORT_SYMBOL_GPL(crypto_unregister_ahashes); int ahash_register_instance(struct crypto_template *tmpl, struct ahash_instance *inst) { int err; if (WARN_ON(!inst->free)) return -EINVAL; err = ahash_prepare_alg(&inst->alg); if (err) return err; return crypto_register_instance(tmpl, ahash_crypto_instance(inst)); } EXPORT_SYMBOL_GPL(ahash_register_instance); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Asynchronous cryptographic hash type");
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (c) 2008-2011, Intel Corporation * * Authors: * Eric Anholt <[email protected]> */ #ifndef _FRAMEBUFFER_H_ #define _FRAMEBUFFER_H_ #include "psb_drv.h" extern int gma_connector_clones(struct drm_device *dev, int type_mask); #endif
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ #ifndef __LINUX_ROMFS_FS_H #define __LINUX_ROMFS_FS_H #include <linux/types.h> #include <linux/fs.h> /* The basic structures of the romfs filesystem */ #define ROMBSIZE BLOCK_SIZE #define ROMBSBITS BLOCK_SIZE_BITS #define ROMBMASK (ROMBSIZE-1) #define ROMFS_MAGIC 0x7275 #define ROMFS_MAXFN 128 #define __mkw(h,l) (((h)&0x00ff)<< 8|((l)&0x00ff)) #define __mkl(h,l) (((h)&0xffff)<<16|((l)&0xffff)) #define __mk4(a,b,c,d) cpu_to_be32(__mkl(__mkw(a,b),__mkw(c,d))) #define ROMSB_WORD0 __mk4('-','r','o','m') #define ROMSB_WORD1 __mk4('1','f','s','-') /* On-disk "super block" */ struct romfs_super_block { __be32 word0; __be32 word1; __be32 size; __be32 checksum; char name[]; /* volume name */ }; /* On disk inode */ struct romfs_inode { __be32 next; /* low 4 bits see ROMFH_ */ __be32 spec; __be32 size; __be32 checksum; char name[]; }; #define ROMFH_TYPE 7 #define ROMFH_HRD 0 #define ROMFH_DIR 1 #define ROMFH_REG 2 #define ROMFH_SYM 3 #define ROMFH_BLK 4 #define ROMFH_CHR 5 #define ROMFH_SCK 6 #define ROMFH_FIF 7 #define ROMFH_EXEC 8 /* Alignment */ #define ROMFH_SIZE 16 #define ROMFH_PAD (ROMFH_SIZE-1) #define ROMFH_MASK (~ROMFH_PAD) #endif
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * BSG helper library * * Copyright (C) 2008 James Smart, Emulex Corporation * Copyright (C) 2011 Red Hat, Inc. All rights reserved. * Copyright (C) 2011 Mike Christie */ #ifndef _BLK_BSG_ #define _BLK_BSG_ #include <linux/blkdev.h> struct bsg_job; struct request; struct device; struct scatterlist; struct request_queue; typedef int (bsg_job_fn) (struct bsg_job *); typedef enum blk_eh_timer_return (bsg_timeout_fn)(struct request *); struct bsg_buffer { unsigned int payload_len; int sg_cnt; struct scatterlist *sg_list; }; struct bsg_job { struct device *dev; struct kref kref; unsigned int timeout; /* Transport/driver specific request/reply structs */ void *request; void *reply; unsigned int request_len; unsigned int reply_len; /* * On entry : reply_len indicates the buffer size allocated for * the reply. * * Upon completion : the message handler must set reply_len * to indicates the size of the reply to be returned to the * caller. */ /* DMA payloads for the request/response */ struct bsg_buffer request_payload; struct bsg_buffer reply_payload; int result; unsigned int reply_payload_rcv_len; /* BIDI support */ struct request *bidi_rq; struct bio *bidi_bio; void *dd_data; /* Used for driver-specific storage */ }; void bsg_job_done(struct bsg_job *job, int result, unsigned int reply_payload_rcv_len); struct request_queue *bsg_setup_queue(struct device *dev, const char *name, struct queue_limits *lim, bsg_job_fn *job_fn, bsg_timeout_fn *timeout, int dd_job_size); void bsg_remove_queue(struct request_queue *q); void bsg_job_put(struct bsg_job *job); int __must_check bsg_job_get(struct bsg_job *job); #endif
/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */ /* * Copyright (c) 2018 BayLibre, SAS. * Author: Jerome Brunet <[email protected]> */ #ifndef __AXG_AUDIO_CLKC_H #define __AXG_AUDIO_CLKC_H /* * Audio Clock register offsets * * Register offsets from the datasheet must be multiplied by 4 before * to get the right offset */ #define AUDIO_CLK_GATE_EN 0x000 #define AUDIO_MCLK_A_CTRL 0x004 #define AUDIO_MCLK_B_CTRL 0x008 #define AUDIO_MCLK_C_CTRL 0x00C #define AUDIO_MCLK_D_CTRL 0x010 #define AUDIO_MCLK_E_CTRL 0x014 #define AUDIO_MCLK_F_CTRL 0x018 #define AUDIO_MST_PAD_CTRL0 0x01c #define AUDIO_MST_PAD_CTRL1 0x020 #define AUDIO_SW_RESET 0x024 #define AUDIO_MST_A_SCLK_CTRL0 0x040 #define AUDIO_MST_A_SCLK_CTRL1 0x044 #define AUDIO_MST_B_SCLK_CTRL0 0x048 #define AUDIO_MST_B_SCLK_CTRL1 0x04C #define AUDIO_MST_C_SCLK_CTRL0 0x050 #define AUDIO_MST_C_SCLK_CTRL1 0x054 #define AUDIO_MST_D_SCLK_CTRL0 0x058 #define AUDIO_MST_D_SCLK_CTRL1 0x05C #define AUDIO_MST_E_SCLK_CTRL0 0x060 #define AUDIO_MST_E_SCLK_CTRL1 0x064 #define AUDIO_MST_F_SCLK_CTRL0 0x068 #define AUDIO_MST_F_SCLK_CTRL1 0x06C #define AUDIO_CLK_TDMIN_A_CTRL 0x080 #define AUDIO_CLK_TDMIN_B_CTRL 0x084 #define AUDIO_CLK_TDMIN_C_CTRL 0x088 #define AUDIO_CLK_TDMIN_LB_CTRL 0x08C #define AUDIO_CLK_TDMOUT_A_CTRL 0x090 #define AUDIO_CLK_TDMOUT_B_CTRL 0x094 #define AUDIO_CLK_TDMOUT_C_CTRL 0x098 #define AUDIO_CLK_SPDIFIN_CTRL 0x09C #define AUDIO_CLK_SPDIFOUT_CTRL 0x0A0 #define AUDIO_CLK_RESAMPLE_CTRL 0x0A4 #define AUDIO_CLK_LOCKER_CTRL 0x0A8 #define AUDIO_CLK_PDMIN_CTRL0 0x0AC #define AUDIO_CLK_PDMIN_CTRL1 0x0B0 #define AUDIO_CLK_SPDIFOUT_B_CTRL 0x0B4 /* SM1 introduce new register and some shifts :( */ #define AUDIO_CLK_GATE_EN1 0x004 #define AUDIO_SM1_MCLK_A_CTRL 0x008 #define AUDIO_SM1_MCLK_B_CTRL 0x00C #define AUDIO_SM1_MCLK_C_CTRL 0x010 #define AUDIO_SM1_MCLK_D_CTRL 0x014 #define AUDIO_SM1_MCLK_E_CTRL 0x018 #define AUDIO_SM1_MCLK_F_CTRL 0x01C #define AUDIO_SM1_MST_PAD_CTRL0 0x020 #define AUDIO_SM1_MST_PAD_CTRL1 0x024 #define AUDIO_SM1_SW_RESET0 0x028 #define AUDIO_SM1_SW_RESET1 0x02C #define AUDIO_CLK81_CTRL 0x030 #define AUDIO_CLK81_EN 0x034 #define AUDIO_EARCRX_CMDC_CLK_CTRL 0x0D0 #define AUDIO_EARCRX_DMAC_CLK_CTRL 0x0D4 #endif /*__AXG_AUDIO_CLKC_H */
/* SPDX-License-Identifier: MIT */ /* * Copyright © 2021 Intel Corporation */ #ifndef __INTEL_BACKLIGHT_H__ #define __INTEL_BACKLIGHT_H__ #include <linux/types.h> struct drm_connector_state; struct intel_atomic_state; struct intel_connector; struct intel_crtc_state; struct intel_encoder; struct intel_panel; enum pipe; void intel_backlight_init_funcs(struct intel_panel *panel); int intel_backlight_setup(struct intel_connector *connector, enum pipe pipe); void intel_backlight_destroy(struct intel_panel *panel); void intel_backlight_enable(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_backlight_update(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); void intel_backlight_disable(const struct drm_connector_state *old_conn_state); void intel_backlight_set_acpi(const struct drm_connector_state *conn_state, u32 level, u32 max); void intel_backlight_set_pwm_level(const struct drm_connector_state *conn_state, u32 level); u32 intel_backlight_invert_pwm_level(struct intel_connector *connector, u32 level); u32 intel_backlight_level_to_pwm(struct intel_connector *connector, u32 level); u32 intel_backlight_level_from_pwm(struct intel_connector *connector, u32 val); #if IS_ENABLED(CONFIG_BACKLIGHT_CLASS_DEVICE) int intel_backlight_device_register(struct intel_connector *connector); void intel_backlight_device_unregister(struct intel_connector *connector); #else /* CONFIG_BACKLIGHT_CLASS_DEVICE */ static inline int intel_backlight_device_register(struct intel_connector *connector) { return 0; } static inline void intel_backlight_device_unregister(struct intel_connector *connector) { } #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */ #endif /* __INTEL_BACKLIGHT_H__ */
// SPDX-License-Identifier: GPL-2.0 /**************************************/ /* this file adapted from font_8x16.c */ /* by Jurriaan Kalkman 05-2005 */ /**************************************/ #include <linux/font.h> #define FONTDATAMAX 3584 static const struct font_data fontdata_7x14 = { { 0, 0, FONTDATAMAX, 0 }, { /* 0 0x00 '^@' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 1 0x01 '^A' */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x82, /* 1000001 */ 0xaa, /* 1010101 */ 0x82, /* 1000001 */ 0x82, /* 1000001 */ 0xba, /* 1011101 */ 0x92, /* 1001001 */ 0x82, /* 1000001 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 2 0x02 '^B' */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0xfe, /* 1111111 */ 0xd6, /* 1101011 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xc6, /* 1100011 */ 0xee, /* 1110111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 3 0x03 '^C' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x7c, /* 0111110 */ 0xfe, /* 1111111 */ 0x7c, /* 0111110 */ 0x38, /* 0011100 */ 0x18, /* 0001100 */ 0x10, /* 0001000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 4 0x04 '^D' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x7c, /* 0111110 */ 0xfe, /* 1111111 */ 0x7c, /* 0111110 */ 0x38, /* 0011100 */ 0x10, /* 0001000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 5 0x05 '^E' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x38, /* 0011100 */ 0x38, /* 0011100 */ 0xee, /* 1110111 */ 0xee, /* 1110111 */ 0xee, /* 1110111 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 6 0x06 '^F' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x7c, /* 0111110 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x7c, /* 0111110 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 7 0x07 '^G' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 8 0x08 '^H' */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xe6, /* 1110011 */ 0xc2, /* 1100001 */ 0xc2, /* 1100001 */ 0xe6, /* 1110011 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ /* 9 0x09 '^I' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x44, /* 0100010 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 10 0x0a '^J' */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xc6, /* 1100011 */ 0x92, /* 1001001 */ 0xba, /* 1011101 */ 0x92, /* 1001001 */ 0xc6, /* 1100011 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ /* 11 0x0b '^K' */ 0x00, /* 0000000 */ 0x1e, /* 0001111 */ 0x0e, /* 0000111 */ 0x1a, /* 0001101 */ 0x1a, /* 0001101 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 12 0x0c '^L' */ 0x00, /* 0000000 */ 0x3c, /* 0011110 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x7e, /* 0111111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 13 0x0d '^M' */ 0x00, /* 0000000 */ 0x3e, /* 0011111 */ 0x36, /* 0011011 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x70, /* 0111000 */ 0xf0, /* 1111000 */ 0xe0, /* 1110000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 14 0x0e '^N' */ 0x00, /* 0000000 */ 0x7e, /* 0111111 */ 0x66, /* 0110011 */ 0x7e, /* 0111111 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x66, /* 0110011 */ 0x6e, /* 0110111 */ 0xee, /* 1110111 */ 0xec, /* 1110110 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 15 0x0f '^O' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0xd6, /* 1101011 */ 0x38, /* 0011100 */ 0xee, /* 1110111 */ 0x38, /* 0011100 */ 0xd6, /* 1101011 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 16 0x10 '^P' */ 0x00, /* 0000000 */ 0x80, /* 1000000 */ 0xc0, /* 1100000 */ 0xe0, /* 1110000 */ 0xf0, /* 1111000 */ 0xfc, /* 1111110 */ 0xf0, /* 1111000 */ 0xe0, /* 1110000 */ 0xc0, /* 1100000 */ 0x80, /* 1000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 17 0x11 '^Q' */ 0x00, /* 0000000 */ 0x04, /* 0000010 */ 0x0c, /* 0000110 */ 0x1c, /* 0001110 */ 0x3c, /* 0011110 */ 0xfc, /* 1111110 */ 0x3c, /* 0011110 */ 0x1c, /* 0001110 */ 0x0c, /* 0000110 */ 0x04, /* 0000010 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 18 0x12 '^R' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x7e, /* 0111111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x7e, /* 0111111 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 19 0x13 '^S' */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 20 0x14 '^T' */ 0x00, /* 0000000 */ 0x7e, /* 0111111 */ 0xd4, /* 1101010 */ 0xd4, /* 1101010 */ 0xd4, /* 1101010 */ 0x74, /* 0111010 */ 0x14, /* 0001010 */ 0x14, /* 0001010 */ 0x14, /* 0001010 */ 0x14, /* 0001010 */ 0x16, /* 0001011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 21 0x15 '^U' */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x60, /* 0110000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x18, /* 0001100 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 22 0x16 '^V' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 23 0x17 '^W' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x7e, /* 0111111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x7e, /* 0111111 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x7e, /* 0111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 24 0x18 '^X' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x7e, /* 0111111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 25 0x19 '^Y' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x7e, /* 0111111 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 26 0x1a '^Z' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0xfc, /* 1111110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 27 0x1b '^[' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xfc, /* 1111110 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 28 0x1c '^\' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 29 0x1d '^]' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x28, /* 0010100 */ 0x6c, /* 0110110 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x28, /* 0010100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 30 0x1e '^^' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 31 0x1f '^_' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 32 0x20 ' ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 33 0x21 '!' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x3c, /* 0011110 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 34 0x22 '"' */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x28, /* 0010100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 35 0x23 '#' */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 36 0x24 '$' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xc4, /* 1100010 */ 0xc0, /* 1100000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x8c, /* 1000110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ /* 37 0x25 '%' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc4, /* 1100010 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xcc, /* 1100110 */ 0x8c, /* 1000110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 38 0x26 '&' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x78, /* 0111100 */ 0xde, /* 1101111 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xdc, /* 1101110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 39 0x27 ''' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 40 0x28 '(' */ 0x00, /* 0000000 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x0c, /* 0000110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 41 0x29 ')' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 42 0x2a '*' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0xfe, /* 1111111 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 43 0x2b '+' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0x7c, /* 0111110 */ 0x10, /* 0001000 */ 0x10, /* 0001000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 44 0x2c ',' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 45 0x2d '-' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 46 0x2e '.' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 47 0x2f '/' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x04, /* 0000010 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0x80, /* 1000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 48 0x30 '0' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xdc, /* 1101110 */ 0xec, /* 1110110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 49 0x31 '1' */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x38, /* 0011100 */ 0x78, /* 0111100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 50 0x32 '2' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 51 0x33 '3' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x38, /* 0011100 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 52 0x34 '4' */ 0x00, /* 0000000 */ 0x0c, /* 0000110 */ 0x1c, /* 0001110 */ 0x3c, /* 0011110 */ 0x6c, /* 0110110 */ 0xcc, /* 1100110 */ 0xfe, /* 1111111 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 53 0x35 '5' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xf8, /* 1111100 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 54 0x36 '6' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xf8, /* 1111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 55 0x37 '7' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 56 0x38 '8' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 57 0x39 '9' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 58 0x3a ':' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 59 0x3b ';' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 60 0x3c '<' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x04, /* 0000010 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x0c, /* 0000110 */ 0x04, /* 0000010 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 61 0x3d '=' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 62 0x3e '>' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x40, /* 0100000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x40, /* 0100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 63 0x3f '?' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 64 0x40 '@' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xdc, /* 1101110 */ 0xdc, /* 1101110 */ 0xd8, /* 1101100 */ 0xc0, /* 1100000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 65 0x41 'A' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 66 0x42 'B' */ 0x00, /* 0000000 */ 0xf8, /* 1111100 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x78, /* 0111100 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xf8, /* 1111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 67 0x43 'C' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc4, /* 1100010 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc4, /* 1100010 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 68 0x44 'D' */ 0x00, /* 0000000 */ 0xf0, /* 1111000 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xd8, /* 1101100 */ 0xf0, /* 1111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 69 0x45 'E' */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x6c, /* 0110110 */ 0x64, /* 0110010 */ 0x68, /* 0110100 */ 0x78, /* 0111100 */ 0x68, /* 0110100 */ 0x60, /* 0110000 */ 0x64, /* 0110010 */ 0x6c, /* 0110110 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 70 0x46 'F' */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x64, /* 0110010 */ 0x60, /* 0110000 */ 0x68, /* 0110100 */ 0x78, /* 0111100 */ 0x68, /* 0110100 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 71 0x47 'G' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc4, /* 1100010 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xdc, /* 1101110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x6c, /* 0110110 */ 0x34, /* 0011010 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 72 0x48 'H' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 73 0x49 'I' */ 0x00, /* 0000000 */ 0x3c, /* 0011110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 74 0x4a 'J' */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 75 0x4b 'K' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xd8, /* 1101100 */ 0xf0, /* 1111000 */ 0xf0, /* 1111000 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 76 0x4c 'L' */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc4, /* 1100010 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 77 0x4d 'M' */ 0x00, /* 0000000 */ 0xc6, /* 1100011 */ 0xee, /* 1110111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xd6, /* 1101011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 78 0x4e 'N' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xec, /* 1110110 */ 0xec, /* 1110110 */ 0xfc, /* 1111110 */ 0xdc, /* 1101110 */ 0xdc, /* 1101110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 79 0x4f 'O' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 80 0x50 'P' */ 0x00, /* 0000000 */ 0xf8, /* 1111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 81 0x51 'Q' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xdc, /* 1101110 */ 0x78, /* 0111100 */ 0x18, /* 0001100 */ 0x1c, /* 0001110 */ 0x00, /* 0000000 */ /* 82 0x52 'R' */ 0x00, /* 0000000 */ 0xf8, /* 1111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 83 0x53 'S' */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0xc4, /* 1100010 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x60, /* 0110000 */ 0x38, /* 0011100 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x8c, /* 1000110 */ 0xf8, /* 1111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 84 0x54 'T' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0xb4, /* 1011010 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 85 0x55 'U' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 86 0x56 'V' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 87 0x57 'W' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xfc, /* 1111110 */ 0x48, /* 0100100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 88 0x58 'X' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 89 0x59 'Y' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 90 0x5a 'Z' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0x8c, /* 1000110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc4, /* 1100010 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 91 0x5b '[' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 92 0x5c '\' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x80, /* 1000000 */ 0xc0, /* 1100000 */ 0xe0, /* 1110000 */ 0x70, /* 0111000 */ 0x38, /* 0011100 */ 0x1c, /* 0001110 */ 0x0c, /* 0000110 */ 0x04, /* 0000010 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 93 0x5d ']' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 94 0x5e '^' */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc6, /* 1100011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 95 0x5f '_' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ /* 96 0x60 '`' */ 0x00, /* 0000000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 97 0x61 'a' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 98 0x62 'b' */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xf0, /* 1111000 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 99 0x63 'c' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 100 0x64 'd' */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x3c, /* 0011110 */ 0x6c, /* 0110110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 101 0x65 'e' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 102 0x66 'f' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x64, /* 0110010 */ 0x60, /* 0110000 */ 0xf0, /* 1111000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0xf0, /* 1111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 103 0x67 'g' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x0c, /* 0000110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ /* 104 0x68 'h' */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xd8, /* 1101100 */ 0xec, /* 1110110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 105 0x69 'i' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 106 0x6a 'j' */ 0x00, /* 0000000 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ /* 107 0x6b 'k' */ 0x00, /* 0000000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0xd8, /* 1101100 */ 0xf0, /* 1111000 */ 0xf0, /* 1111000 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 108 0x6c 'l' */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 109 0x6d 'm' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xec, /* 1110110 */ 0xfe, /* 1111111 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 110 0x6e 'n' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xb8, /* 1011100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 111 0x6f 'o' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 112 0x70 'p' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xb8, /* 1011100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ /* 113 0x71 'q' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x74, /* 0111010 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ /* 114 0x72 'r' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xb8, /* 1011100 */ 0xec, /* 1110110 */ 0xcc, /* 1100110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 115 0x73 's' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 116 0x74 't' */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x36, /* 0011011 */ 0x1c, /* 0001110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 117 0x75 'u' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 118 0x76 'v' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 119 0x77 'w' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 120 0x78 'x' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 121 0x79 'y' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0xf0, /* 1111000 */ /* 122 0x7a 'z' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 123 0x7b '{' */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xe0, /* 1110000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x1c, /* 0001110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 124 0x7c '|' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 125 0x7d '}' */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x0e, /* 0000111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 126 0x7e '~' */ 0x00, /* 0000000 */ 0xec, /* 1110110 */ 0xb8, /* 1011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 127 0x7f '' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 128 0x80 'Ç' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc4, /* 1100010 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc4, /* 1100010 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x18, /* 0001100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ /* 129 0x81 'ü' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 130 0x82 'é' */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 131 0x83 'â' */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 132 0x84 'ä' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 133 0x85 'à' */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 134 0x86 'å' */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 135 0x87 'ç' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xe0, /* 1110000 */ /* 136 0x88 'ê' */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 137 0x89 'ë' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 138 0x8a 'è' */ 0xc0, /* 1100000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 139 0x8b 'ï' */ 0x00, /* 0000000 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x3c, /* 0011110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 140 0x8c 'î' */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 141 0x8d 'ì' */ 0xc0, /* 1100000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 142 0x8e 'Ä' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 143 0x8f 'Å' */ 0x30, /* 0011000 */ 0x48, /* 0100100 */ 0x48, /* 0100100 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 144 0x90 'É' */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xc4, /* 1100010 */ 0xd0, /* 1101000 */ 0xf0, /* 1111000 */ 0xd0, /* 1101000 */ 0xc4, /* 1100010 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 145 0x91 'æ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xec, /* 1110110 */ 0x36, /* 0011011 */ 0x36, /* 0011011 */ 0x7e, /* 0111111 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x6e, /* 0110111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 146 0x92 'Æ' */ 0x00, /* 0000000 */ 0x3e, /* 0011111 */ 0x6c, /* 0110110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfe, /* 1111111 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xce, /* 1100111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 147 0x93 'ô' */ 0x10, /* 0001000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 148 0x94 'ö' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 149 0x95 'ò' */ 0xc0, /* 1100000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 150 0x96 'û' */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 151 0x97 'ù' */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 152 0x98 'ÿ' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x70, /* 0111000 */ /* 153 0x99 'Ö' */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 154 0x9a 'Ü' */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 155 0x9b '¢' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0x7c, /* 0111110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 156 0x9c '£' */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x64, /* 0110010 */ 0x60, /* 0110000 */ 0xf0, /* 1111000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0xe6, /* 1110011 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 157 0x9d '¥' */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 158 0x9e '₧' */ 0xf8, /* 1111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0xc4, /* 1100010 */ 0xcc, /* 1100110 */ 0xde, /* 1101111 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xc6, /* 1100011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 159 0x9f 'ƒ' */ 0x1c, /* 0001110 */ 0x36, /* 0011011 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xb0, /* 1011000 */ 0xe0, /* 1110000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 160 0xa0 'á' */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 161 0xa1 'í' */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 162 0xa2 'ó' */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 163 0xa3 'ú' */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 164 0xa4 'ñ' */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0x00, /* 0000000 */ 0xb8, /* 1011100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 165 0xa5 'Ñ' */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xec, /* 1110110 */ 0xec, /* 1110110 */ 0xfc, /* 1111110 */ 0xdc, /* 1101110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 166 0xa6 'ª' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 167 0xa7 'º' */ 0x00, /* 0000000 */ 0x70, /* 0111000 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0xf8, /* 1111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 168 0xa8 '¿' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 169 0xa9 '⌐' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 170 0xaa '¬' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 171 0xab '½' */ 0x60, /* 0110000 */ 0xe0, /* 1110000 */ 0x62, /* 0110001 */ 0x66, /* 0110011 */ 0x6c, /* 0110110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0xb8, /* 1011100 */ 0x4c, /* 0100110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x7c, /* 0111110 */ /* 172 0xac '¼' */ 0x60, /* 0110000 */ 0xe0, /* 1110000 */ 0x62, /* 0110001 */ 0x66, /* 0110011 */ 0x6c, /* 0110110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x6c, /* 0110110 */ 0xdc, /* 1101110 */ 0xb4, /* 1011010 */ 0x7e, /* 0111111 */ 0x0c, /* 0000110 */ 0x0c, /* 0000110 */ 0x00, /* 0000000 */ /* 173 0xad '¡' */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 174 0xae '«' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x36, /* 0011011 */ 0x6c, /* 0110110 */ 0xd8, /* 1101100 */ 0x6c, /* 0110110 */ 0x36, /* 0011011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 175 0xaf '»' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xd8, /* 1101100 */ 0x6c, /* 0110110 */ 0x36, /* 0011011 */ 0x6c, /* 0110110 */ 0xd8, /* 1101100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 176 0xb0 '░' */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ 0x88, /* 1000100 */ 0x22, /* 0010001 */ /* 177 0xb1 '▒' */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ 0x54, /* 0101010 */ 0xaa, /* 1010101 */ /* 178 0xb2 '▓' */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ 0xee, /* 1110111 */ 0xba, /* 1011101 */ /* 179 0xb3 '│' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 180 0xb4 '┤' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 181 0xb5 '╡' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 182 0xb6 '╢' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xec, /* 1110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 183 0xb7 '╖' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 184 0xb8 '╕' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 185 0xb9 '╣' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xec, /* 1110110 */ 0x0c, /* 0000110 */ 0xec, /* 1110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 186 0xba '║' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 187 0xbb '╗' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x0c, /* 0000110 */ 0xec, /* 1110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 188 0xbc '╝' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xec, /* 1110110 */ 0x0c, /* 0000110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 189 0xbd '╜' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 190 0xbe '╛' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 191 0xbf '┐' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xf0, /* 1111000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 192 0xc0 '└' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 193 0xc1 '┴' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 194 0xc2 '┬' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 195 0xc3 '├' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 196 0xc4 '─' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 197 0xc5 '┼' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfe, /* 1111111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 198 0xc6 '╞' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 199 0xc7 '╟' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6e, /* 0110111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 200 0xc8 '╚' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6e, /* 0110111 */ 0x60, /* 0110000 */ 0x7e, /* 0111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 201 0xc9 '╔' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7e, /* 0111111 */ 0x60, /* 0110000 */ 0x6e, /* 0110111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 202 0xca '╩' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xee, /* 1110111 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 203 0xcb '╦' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0xee, /* 1110111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 204 0xcc '╠' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6e, /* 0110111 */ 0x60, /* 0110000 */ 0x6e, /* 0110111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 205 0xcd '═' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 206 0xce '╬' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xee, /* 1110111 */ 0x00, /* 0000000 */ 0xee, /* 1110111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 207 0xcf '╧' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 208 0xd0 '╨' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 209 0xd1 '╤' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 210 0xd2 '╥' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 211 0xd3 '╙' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x7e, /* 0111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 212 0xd4 '╘' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 213 0xd5 '╒' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 214 0xd6 '╓' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7e, /* 0111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 215 0xd7 '╫' */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ /* 216 0xd8 '╪' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfe, /* 1111111 */ 0x30, /* 0011000 */ 0xfe, /* 1111111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 217 0xd9 '┘' */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xf0, /* 1111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 218 0xda '┌' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x3e, /* 0011111 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 219 0xdb '█' */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ /* 220 0xdc '▄' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ /* 221 0xdd '▌' */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ 0xe0, /* 1110000 */ /* 222 0xde '▐' */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ 0x1e, /* 0001111 */ /* 223 0xdf '▀' */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 224 0xe0 'α' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xdc, /* 1101110 */ 0x76, /* 0111011 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 225 0xe1 'ß' */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xd8, /* 1101100 */ 0xcc, /* 1100110 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 226 0xe2 'Γ' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 227 0xe3 'π' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfe, /* 1111111 */ 0xfe, /* 1111111 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 228 0xe4 'Σ' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 229 0xe5 'σ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7e, /* 0111111 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 230 0xe6 'µ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xf8, /* 1111100 */ 0xc0, /* 1100000 */ 0xc0, /* 1100000 */ 0x80, /* 1000000 */ /* 231 0xe7 'τ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 232 0xe8 'Φ' */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 233 0xe9 'Θ' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xfc, /* 1111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 234 0xea 'Ω' */ 0x00, /* 0000000 */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0xc6, /* 1100011 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0xee, /* 1110111 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 235 0xeb 'δ' */ 0x00, /* 0000000 */ 0x3c, /* 0011110 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x7c, /* 0111110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x78, /* 0111100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 236 0xec '∞' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 237 0xed 'φ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x06, /* 0000011 */ 0x0c, /* 0000110 */ 0x7c, /* 0111110 */ 0xd6, /* 1101011 */ 0xd6, /* 1101011 */ 0xe6, /* 1110011 */ 0x7c, /* 0111110 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 238 0xee 'ε' */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x7c, /* 0111110 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x1c, /* 0001110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 239 0xef '∩' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0xcc, /* 1100110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 240 0xf0 '≡' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 241 0xf1 '±' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0xfc, /* 1111110 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 242 0xf2 '≥' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x0c, /* 0000110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 243 0xf3 '≤' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x60, /* 0110000 */ 0xc0, /* 1100000 */ 0x60, /* 0110000 */ 0x30, /* 0011000 */ 0x18, /* 0001100 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 244 0xf4 '⌠' */ 0x00, /* 0000000 */ 0x1c, /* 0001110 */ 0x36, /* 0011011 */ 0x36, /* 0011011 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ /* 245 0xf5 '⌡' */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x70, /* 0111000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 246 0xf6 '÷' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 247 0xf7 '≈' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0x00, /* 0000000 */ 0x76, /* 0111011 */ 0xdc, /* 1101110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 248 0xf8 '°' */ 0x38, /* 0011100 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 249 0xf9 '·' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 250 0xfa '•' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x30, /* 0011000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 251 0xfb '√' */ 0x1e, /* 0001111 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0x18, /* 0001100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0xd8, /* 1101100 */ 0x78, /* 0111100 */ 0x38, /* 0011100 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 252 0xfc 'ⁿ' */ 0xd8, /* 1101100 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x6c, /* 0110110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 253 0xfd '²' */ 0x78, /* 0111100 */ 0xcc, /* 1100110 */ 0x18, /* 0001100 */ 0x30, /* 0011000 */ 0x64, /* 0110010 */ 0xfc, /* 1111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 254 0xfe '■' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x7c, /* 0111110 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ /* 255 0xff ' ' */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ 0x00, /* 0000000 */ } }; const struct font_desc font_7x14 = { .idx = FONT7x14_IDX, .name = "7x14", .width = 7, .height = 14, .charcount = 256, .data = fontdata_7x14.data, .pref = 0, };
// SPDX-License-Identifier: GPL-2.0 /* * helpers to map values in a linear range to range index * * Original idea borrowed from regulator framework * * It might be useful if we could support also inversely proportional ranges? * Copyright 2020 ROHM Semiconductors */ #include <linux/errno.h> #include <linux/export.h> #include <linux/kernel.h> #include <linux/linear_range.h> #include <linux/module.h> /** * linear_range_values_in_range - return the amount of values in a range * @r: pointer to linear range where values are counted * * Compute the amount of values in range pointed by @r. Note, values can * be all equal - range with selectors 0,...,2 with step 0 still contains * 3 values even though they are all equal. * * Return: the amount of values in range pointed by @r */ unsigned int linear_range_values_in_range(const struct linear_range *r) { if (!r) return 0; return r->max_sel - r->min_sel + 1; } EXPORT_SYMBOL_GPL(linear_range_values_in_range); /** * linear_range_values_in_range_array - return the amount of values in ranges * @r: pointer to array of linear ranges where values are counted * @ranges: amount of ranges we include in computation. * * Compute the amount of values in ranges pointed by @r. Note, values can * be all equal - range with selectors 0,...,2 with step 0 still contains * 3 values even though they are all equal. * * Return: the amount of values in first @ranges ranges pointed by @r */ unsigned int linear_range_values_in_range_array(const struct linear_range *r, int ranges) { int i, values_in_range = 0; for (i = 0; i < ranges; i++) { int values; values = linear_range_values_in_range(&r[i]); if (!values) return values; values_in_range += values; } return values_in_range; } EXPORT_SYMBOL_GPL(linear_range_values_in_range_array); /** * linear_range_get_max_value - return the largest value in a range * @r: pointer to linear range where value is looked from * * Return: the largest value in the given range */ unsigned int linear_range_get_max_value(const struct linear_range *r) { return r->min + (r->max_sel - r->min_sel) * r->step; } EXPORT_SYMBOL_GPL(linear_range_get_max_value); /** * linear_range_get_value - fetch a value from given range * @r: pointer to linear range where value is looked from * @selector: selector for which the value is searched * @val: address where found value is updated * * Search given ranges for value which matches given selector. * * Return: 0 on success, -EINVAL given selector is not found from any of the * ranges. */ int linear_range_get_value(const struct linear_range *r, unsigned int selector, unsigned int *val) { if (r->min_sel > selector || r->max_sel < selector) return -EINVAL; *val = r->min + (selector - r->min_sel) * r->step; return 0; } EXPORT_SYMBOL_GPL(linear_range_get_value); /** * linear_range_get_value_array - fetch a value from array of ranges * @r: pointer to array of linear ranges where value is looked from * @ranges: amount of ranges in an array * @selector: selector for which the value is searched * @val: address where found value is updated * * Search through an array of ranges for value which matches given selector. * * Return: 0 on success, -EINVAL given selector is not found from any of the * ranges. */ int linear_range_get_value_array(const struct linear_range *r, int ranges, unsigned int selector, unsigned int *val) { int i; for (i = 0; i < ranges; i++) if (r[i].min_sel <= selector && r[i].max_sel >= selector) return linear_range_get_value(&r[i], selector, val); return -EINVAL; } EXPORT_SYMBOL_GPL(linear_range_get_value_array); /** * linear_range_get_selector_low - return linear range selector for value * @r: pointer to linear range where selector is looked from * @val: value for which the selector is searched * @selector: address where found selector value is updated * @found: flag to indicate that given value was in the range * * Return selector for which range value is closest match for given * input value. Value is matching if it is equal or smaller than given * value. If given value is in the range, then @found is set true. * * Return: 0 on success, -EINVAL if range is invalid or does not contain * value smaller or equal to given value */ int linear_range_get_selector_low(const struct linear_range *r, unsigned int val, unsigned int *selector, bool *found) { *found = false; if (r->min > val) return -EINVAL; if (linear_range_get_max_value(r) < val) { *selector = r->max_sel; return 0; } *found = true; if (r->step == 0) *selector = r->min_sel; else *selector = (val - r->min) / r->step + r->min_sel; return 0; } EXPORT_SYMBOL_GPL(linear_range_get_selector_low); /** * linear_range_get_selector_low_array - return linear range selector for value * @r: pointer to array of linear ranges where selector is looked from * @ranges: amount of ranges to scan from array * @val: value for which the selector is searched * @selector: address where found selector value is updated * @found: flag to indicate that given value was in the range * * Scan array of ranges for selector for which range value matches given * input value. Value is matching if it is equal or smaller than given * value. If given value is found to be in a range scanning is stopped and * @found is set true. If a range with values smaller than given value is found * but the range max is being smaller than given value, then the range's * biggest selector is updated to @selector but scanning ranges is continued * and @found is set to false. * * Return: 0 on success, -EINVAL if range array is invalid or does not contain * range with a value smaller or equal to given value */ int linear_range_get_selector_low_array(const struct linear_range *r, int ranges, unsigned int val, unsigned int *selector, bool *found) { int i; int ret = -EINVAL; for (i = 0; i < ranges; i++) { int tmpret; tmpret = linear_range_get_selector_low(&r[i], val, selector, found); if (!tmpret) ret = 0; if (*found) break; } return ret; } EXPORT_SYMBOL_GPL(linear_range_get_selector_low_array); /** * linear_range_get_selector_high - return linear range selector for value * @r: pointer to linear range where selector is looked from * @val: value for which the selector is searched * @selector: address where found selector value is updated * @found: flag to indicate that given value was in the range * * Return selector for which range value is closest match for given * input value. Value is matching if it is equal or higher than given * value. If given value is in the range, then @found is set true. * * Return: 0 on success, -EINVAL if range is invalid or does not contain * value greater or equal to given value */ int linear_range_get_selector_high(const struct linear_range *r, unsigned int val, unsigned int *selector, bool *found) { *found = false; if (linear_range_get_max_value(r) < val) return -EINVAL; if (r->min > val) { *selector = r->min_sel; return 0; } *found = true; if (r->step == 0) *selector = r->max_sel; else *selector = DIV_ROUND_UP(val - r->min, r->step) + r->min_sel; return 0; } EXPORT_SYMBOL_GPL(linear_range_get_selector_high); /** * linear_range_get_selector_within - return linear range selector for value * @r: pointer to linear range where selector is looked from * @val: value for which the selector is searched * @selector: address where found selector value is updated * * Return selector for which range value is closest match for given * input value. Value is matching if it is equal or lower than given * value. But return maximum selector if given value is higher than * maximum value. */ void linear_range_get_selector_within(const struct linear_range *r, unsigned int val, unsigned int *selector) { if (r->min > val) { *selector = r->min_sel; return; } if (linear_range_get_max_value(r) < val) { *selector = r->max_sel; return; } if (r->step == 0) *selector = r->min_sel; else *selector = (val - r->min) / r->step + r->min_sel; } EXPORT_SYMBOL_GPL(linear_range_get_selector_within); MODULE_DESCRIPTION("linear-ranges helper"); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-or-later */ /* * cx18 driver version information * * Copyright (C) 2007 Hans Verkuil <[email protected]> */ #ifndef CX18_VERSION_H #define CX18_VERSION_H #define CX18_DRIVER_NAME "cx18" #define CX18_VERSION "1.5.1" #endif
/* * Copyright (c) 2002-2010, Intel Corporation. * Copyright (c) 2014 ATRON electronic GmbH * Author: Jan Safrata <[email protected]> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ #include <linux/delay.h> #include <linux/i2c-algo-bit.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/types.h> #include "psb_drv.h" #include "psb_intel_reg.h" /* * LPC GPIO based I2C bus for LVDS of Atom E6xx */ /*----------------------------------------------------------------------------- * LPC Register Offsets. Used for LVDS GPIO Bit Bashing. Registers are part * Atom E6xx [D31:F0] ----------------------------------------------------------------------------*/ #define RGEN 0x20 #define RGIO 0x24 #define RGLVL 0x28 #define RGTPE 0x2C #define RGTNE 0x30 #define RGGPE 0x34 #define RGSMI 0x38 #define RGTS 0x3C /* The LVDS GPIO clock lines are GPIOSUS[3] * The LVDS GPIO data lines are GPIOSUS[4] */ #define GPIO_CLOCK 0x08 #define GPIO_DATA 0x10 #define LPC_READ_REG(chan, r) inl((chan)->reg + (r)) #define LPC_WRITE_REG(chan, r, val) outl((val), (chan)->reg + (r)) static int get_clock(void *data) { struct gma_i2c_chan *chan = data; u32 val; val = LPC_READ_REG(chan, RGIO); val |= GPIO_CLOCK; LPC_WRITE_REG(chan, RGIO, val); LPC_READ_REG(chan, RGLVL); val = (LPC_READ_REG(chan, RGLVL) & GPIO_CLOCK) ? 1 : 0; return val; } static int get_data(void *data) { struct gma_i2c_chan *chan = data; u32 val; val = LPC_READ_REG(chan, RGIO); val |= GPIO_DATA; LPC_WRITE_REG(chan, RGIO, val); LPC_READ_REG(chan, RGLVL); val = (LPC_READ_REG(chan, RGLVL) & GPIO_DATA) ? 1 : 0; return val; } static void set_clock(void *data, int state_high) { struct gma_i2c_chan *chan = data; u32 val; if (state_high) { val = LPC_READ_REG(chan, RGIO); val |= GPIO_CLOCK; LPC_WRITE_REG(chan, RGIO, val); } else { val = LPC_READ_REG(chan, RGIO); val &= ~GPIO_CLOCK; LPC_WRITE_REG(chan, RGIO, val); val = LPC_READ_REG(chan, RGLVL); val &= ~GPIO_CLOCK; LPC_WRITE_REG(chan, RGLVL, val); } } static void set_data(void *data, int state_high) { struct gma_i2c_chan *chan = data; u32 val; if (state_high) { val = LPC_READ_REG(chan, RGIO); val |= GPIO_DATA; LPC_WRITE_REG(chan, RGIO, val); } else { val = LPC_READ_REG(chan, RGIO); val &= ~GPIO_DATA; LPC_WRITE_REG(chan, RGIO, val); val = LPC_READ_REG(chan, RGLVL); val &= ~GPIO_DATA; LPC_WRITE_REG(chan, RGLVL, val); } } struct gma_i2c_chan *oaktrail_lvds_i2c_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = to_drm_psb_private(dev); struct gma_i2c_chan *chan; int ret; chan = kzalloc(sizeof(struct gma_i2c_chan), GFP_KERNEL); if (!chan) return ERR_PTR(-ENOMEM); chan->drm_dev = dev; chan->reg = dev_priv->lpc_gpio_base; strscpy(chan->base.name, "gma500 LPC", sizeof(chan->base.name)); chan->base.owner = THIS_MODULE; chan->base.algo_data = &chan->algo; chan->base.dev.parent = dev->dev; chan->algo.setsda = set_data; chan->algo.setscl = set_clock; chan->algo.getsda = get_data; chan->algo.getscl = get_clock; chan->algo.udelay = 100; chan->algo.timeout = usecs_to_jiffies(2200); chan->algo.data = chan; i2c_set_adapdata(&chan->base, chan); set_data(chan, 1); set_clock(chan, 1); udelay(50); ret = i2c_bit_add_bus(&chan->base); if (ret < 0) { kfree(chan); return ERR_PTR(ret); } return chan; }
// SPDX-License-Identifier: (GPL-2.0+ OR MIT) /* * Google Pazquel board device tree source * * Copyright 2022 Google LLC. */ /dts-v1/; #include "sc7180-trogdor.dtsi" #include "sc7180-trogdor-parade-ps8640.dtsi" #include "sc7180-trogdor-pazquel360.dtsi" / { model = "Google Pazquel (Parade,WIFI-only)"; compatible = "google,pazquel-sku21", "qcom,sc7180"; };
// SPDX-License-Identifier: GPL-2.0 // // Register map access API - SPI AVMM support // // Copyright (C) 2018-2020 Intel Corporation. All rights reserved. #include <linux/module.h> #include <linux/regmap.h> #include <linux/spi/spi.h> #include <linux/swab.h> /* * This driver implements the regmap operations for a generic SPI * master to access the registers of the spi slave chip which has an * Avalone bus in it. * * The "SPI slave to Avalon Master Bridge" (spi-avmm) IP should be integrated * in the spi slave chip. The IP acts as a bridge to convert encoded streams of * bytes from the host to the internal register read/write on Avalon bus. In * order to issue register access requests to the slave chip, the host should * send formatted bytes that conform to the transfer protocol. * The transfer protocol contains 3 layers: transaction layer, packet layer * and physical layer. * * Reference Documents could be found at: * https://www.intel.com/content/www/us/en/programmable/documentation/sfo1400787952932.html * * Chapter "SPI Slave/JTAG to Avalon Master Bridge Cores" is a general * introduction to the protocol. * * Chapter "Avalon Packets to Transactions Converter Core" describes * the transaction layer. * * Chapter "Avalon-ST Bytes to Packets and Packets to Bytes Converter Cores" * describes the packet layer. * * Chapter "Avalon-ST Serial Peripheral Interface Core" describes the * physical layer. * * * When host issues a regmap read/write, the driver will transform the request * to byte stream layer by layer. It formats the register addr, value and * length to the transaction layer request, then converts the request to packet * layer bytes stream and then to physical layer bytes stream. Finally the * driver sends the formatted byte stream over SPI bus to the slave chip. * * The spi-avmm IP on the slave chip decodes the byte stream and initiates * register read/write on its internal Avalon bus, and then encodes the * response to byte stream and sends back to host. * * The driver receives the byte stream, reverses the 3 layers transformation, * and finally gets the response value (read out data for register read, * successful written size for register write). */ #define PKT_SOP 0x7a #define PKT_EOP 0x7b #define PKT_CHANNEL 0x7c #define PKT_ESC 0x7d #define PHY_IDLE 0x4a #define PHY_ESC 0x4d #define TRANS_CODE_WRITE 0x0 #define TRANS_CODE_SEQ_WRITE 0x4 #define TRANS_CODE_READ 0x10 #define TRANS_CODE_SEQ_READ 0x14 #define TRANS_CODE_NO_TRANS 0x7f #define SPI_AVMM_XFER_TIMEOUT (msecs_to_jiffies(200)) /* slave's register addr is 32 bits */ #define SPI_AVMM_REG_SIZE 4UL /* slave's register value is 32 bits */ #define SPI_AVMM_VAL_SIZE 4UL /* * max rx size could be larger. But considering the buffer consuming, * it is proper that we limit 1KB xfer at max. */ #define MAX_READ_CNT 256UL #define MAX_WRITE_CNT 1UL struct trans_req_header { u8 code; u8 rsvd; __be16 size; __be32 addr; } __packed; struct trans_resp_header { u8 r_code; u8 rsvd; __be16 size; } __packed; #define TRANS_REQ_HD_SIZE (sizeof(struct trans_req_header)) #define TRANS_RESP_HD_SIZE (sizeof(struct trans_resp_header)) /* * In transaction layer, * the write request format is: Transaction request header + data * the read request format is: Transaction request header * the write response format is: Transaction response header * the read response format is: pure data, no Transaction response header */ #define TRANS_WR_TX_SIZE(n) (TRANS_REQ_HD_SIZE + SPI_AVMM_VAL_SIZE * (n)) #define TRANS_RD_TX_SIZE TRANS_REQ_HD_SIZE #define TRANS_TX_MAX TRANS_WR_TX_SIZE(MAX_WRITE_CNT) #define TRANS_RD_RX_SIZE(n) (SPI_AVMM_VAL_SIZE * (n)) #define TRANS_WR_RX_SIZE TRANS_RESP_HD_SIZE #define TRANS_RX_MAX TRANS_RD_RX_SIZE(MAX_READ_CNT) /* tx & rx share one transaction layer buffer */ #define TRANS_BUF_SIZE ((TRANS_TX_MAX > TRANS_RX_MAX) ? \ TRANS_TX_MAX : TRANS_RX_MAX) /* * In tx phase, the host prepares all the phy layer bytes of a request in the * phy buffer and sends them in a batch. * * The packet layer and physical layer defines several special chars for * various purpose, when a transaction layer byte hits one of these special * chars, it should be escaped. The escape rule is, "Escape char first, * following the byte XOR'ed with 0x20". * * This macro defines the max possible length of the phy data. In the worst * case, all transaction layer bytes need to be escaped (so the data length * doubles), plus 4 special chars (SOP, CHANNEL, CHANNEL_NUM, EOP). Finally * we should make sure the length is aligned to SPI BPW. */ #define PHY_TX_MAX ALIGN(2 * TRANS_TX_MAX + 4, 4) /* * Unlike tx, phy rx is affected by possible PHY_IDLE bytes from slave, the max * length of the rx bit stream is unpredictable. So the driver reads the words * one by one, and parses each word immediately into transaction layer buffer. * Only one word length of phy buffer is used for rx. */ #define PHY_BUF_SIZE PHY_TX_MAX /** * struct spi_avmm_bridge - SPI slave to AVMM bus master bridge * * @spi: spi slave associated with this bridge. * @word_len: bytes of word for spi transfer. * @trans_len: length of valid data in trans_buf. * @phy_len: length of valid data in phy_buf. * @trans_buf: the bridge buffer for transaction layer data. * @phy_buf: the bridge buffer for physical layer data. * @swap_words: the word swapping cb for phy data. NULL if not needed. * * As a device's registers are implemented on the AVMM bus address space, it * requires the driver to issue formatted requests to spi slave to AVMM bus * master bridge to perform register access. */ struct spi_avmm_bridge { struct spi_device *spi; unsigned char word_len; unsigned int trans_len; unsigned int phy_len; /* bridge buffer used in translation between protocol layers */ char trans_buf[TRANS_BUF_SIZE]; char phy_buf[PHY_BUF_SIZE]; void (*swap_words)(void *buf, unsigned int len); }; static void br_swap_words_32(void *buf, unsigned int len) { swab32_array(buf, len / 4); } /* * Format transaction layer data in br->trans_buf according to the register * access request, Store valid transaction layer data length in br->trans_len. */ static int br_trans_tx_prepare(struct spi_avmm_bridge *br, bool is_read, u32 reg, u32 *wr_val, u32 count) { struct trans_req_header *header; unsigned int trans_len; u8 code; __le32 *data; int i; if (is_read) { if (count == 1) code = TRANS_CODE_READ; else code = TRANS_CODE_SEQ_READ; } else { if (count == 1) code = TRANS_CODE_WRITE; else code = TRANS_CODE_SEQ_WRITE; } header = (struct trans_req_header *)br->trans_buf; header->code = code; header->rsvd = 0; header->size = cpu_to_be16((u16)count * SPI_AVMM_VAL_SIZE); header->addr = cpu_to_be32(reg); trans_len = TRANS_REQ_HD_SIZE; if (!is_read) { trans_len += SPI_AVMM_VAL_SIZE * count; if (trans_len > sizeof(br->trans_buf)) return -ENOMEM; data = (__le32 *)(br->trans_buf + TRANS_REQ_HD_SIZE); for (i = 0; i < count; i++) *data++ = cpu_to_le32(*wr_val++); } /* Store valid trans data length for next layer */ br->trans_len = trans_len; return 0; } /* * Convert transaction layer data (in br->trans_buf) to phy layer data, store * them in br->phy_buf. Pad the phy_buf aligned with SPI's BPW. Store valid phy * layer data length in br->phy_len. * * phy_buf len should be aligned with SPI's BPW. Spare bytes should be padded * with PHY_IDLE, then the slave will just drop them. * * The driver will not simply pad 4a at the tail. The concern is that driver * will not store MISO data during tx phase, if the driver pads 4a at the tail, * it is possible that if the slave is fast enough to response at the padding * time. As a result these rx bytes are lost. In the following case, 7a,7c,00 * will lost. * MOSI ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40|4a|4a|4a| |XX|XX|... * MISO ...|4a|4a|4a|4a| |4a|4a|4a|4a| |4a|4a|4a|4a| |4a|7a|7c|00| |78|56|... * * So the driver moves EOP and bytes after EOP to the end of the aligned size, * then fill the hole with PHY_IDLE. As following: * before pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|7b| |40| * after pad ...|7a|7c|00|10| |00|00|04|02| |4b|7d|5a|4a| |4a|4a|7b|40| * Then if the slave will not get the entire packet before the tx phase is * over, it can't responsed to anything either. */ static int br_pkt_phy_tx_prepare(struct spi_avmm_bridge *br) { char *tb, *tb_end, *pb, *pb_limit, *pb_eop = NULL; unsigned int aligned_phy_len, move_size; bool need_esc = false; tb = br->trans_buf; tb_end = tb + br->trans_len; pb = br->phy_buf; pb_limit = pb + ARRAY_SIZE(br->phy_buf); *pb++ = PKT_SOP; /* * The driver doesn't support multiple channels so the channel number * is always 0. */ *pb++ = PKT_CHANNEL; *pb++ = 0x0; for (; pb < pb_limit && tb < tb_end; pb++) { if (need_esc) { *pb = *tb++ ^ 0x20; need_esc = false; continue; } /* EOP should be inserted before the last valid char */ if (tb == tb_end - 1 && !pb_eop) { *pb = PKT_EOP; pb_eop = pb; continue; } /* * insert an ESCAPE char if the data value equals any special * char. */ switch (*tb) { case PKT_SOP: case PKT_EOP: case PKT_CHANNEL: case PKT_ESC: *pb = PKT_ESC; need_esc = true; break; case PHY_IDLE: case PHY_ESC: *pb = PHY_ESC; need_esc = true; break; default: *pb = *tb++; break; } } /* The phy buffer is used out but transaction layer data remains */ if (tb < tb_end) return -ENOMEM; /* Store valid phy data length for spi transfer */ br->phy_len = pb - br->phy_buf; if (br->word_len == 1) return 0; /* Do phy buf padding if word_len > 1 byte. */ aligned_phy_len = ALIGN(br->phy_len, br->word_len); if (aligned_phy_len > sizeof(br->phy_buf)) return -ENOMEM; if (aligned_phy_len == br->phy_len) return 0; /* move EOP and bytes after EOP to the end of aligned size */ move_size = pb - pb_eop; memmove(&br->phy_buf[aligned_phy_len - move_size], pb_eop, move_size); /* fill the hole with PHY_IDLEs */ memset(pb_eop, PHY_IDLE, aligned_phy_len - br->phy_len); /* update the phy data length */ br->phy_len = aligned_phy_len; return 0; } /* * In tx phase, the slave only returns PHY_IDLE (0x4a). So the driver will * ignore rx in tx phase. */ static int br_do_tx(struct spi_avmm_bridge *br) { /* reorder words for spi transfer */ if (br->swap_words) br->swap_words(br->phy_buf, br->phy_len); /* send all data in phy_buf */ return spi_write(br->spi, br->phy_buf, br->phy_len); } /* * This function read the rx byte stream from SPI word by word and convert * them to transaction layer data in br->trans_buf. It also stores the length * of rx transaction layer data in br->trans_len * * The slave may send an unknown number of PHY_IDLEs in rx phase, so we cannot * prepare a fixed length buffer to receive all of the rx data in a batch. We * have to read word by word and convert them to transaction layer data at * once. */ static int br_do_rx_and_pkt_phy_parse(struct spi_avmm_bridge *br) { bool eop_found = false, channel_found = false, esc_found = false; bool valid_word = false, last_try = false; struct device *dev = &br->spi->dev; char *pb, *tb_limit, *tb = NULL; unsigned long poll_timeout; int ret, i; tb_limit = br->trans_buf + ARRAY_SIZE(br->trans_buf); pb = br->phy_buf; poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT; while (tb < tb_limit) { ret = spi_read(br->spi, pb, br->word_len); if (ret) return ret; /* reorder the word back */ if (br->swap_words) br->swap_words(pb, br->word_len); valid_word = false; for (i = 0; i < br->word_len; i++) { /* drop everything before first SOP */ if (!tb && pb[i] != PKT_SOP) continue; /* drop PHY_IDLE */ if (pb[i] == PHY_IDLE) continue; valid_word = true; /* * We don't support multiple channels, so error out if * a non-zero channel number is found. */ if (channel_found) { if (pb[i] != 0) { dev_err(dev, "%s channel num != 0\n", __func__); return -EFAULT; } channel_found = false; continue; } switch (pb[i]) { case PKT_SOP: /* * reset the parsing if a second SOP appears. */ tb = br->trans_buf; eop_found = false; channel_found = false; esc_found = false; break; case PKT_EOP: /* * No special char is expected after ESC char. * No special char (except ESC & PHY_IDLE) is * expected after EOP char. * * The special chars are all dropped. */ if (esc_found || eop_found) return -EFAULT; eop_found = true; break; case PKT_CHANNEL: if (esc_found || eop_found) return -EFAULT; channel_found = true; break; case PKT_ESC: case PHY_ESC: if (esc_found) return -EFAULT; esc_found = true; break; default: /* Record the normal byte in trans_buf. */ if (esc_found) { *tb++ = pb[i] ^ 0x20; esc_found = false; } else { *tb++ = pb[i]; } /* * We get the last normal byte after EOP, it is * time we finish. Normally the function should * return here. */ if (eop_found) { br->trans_len = tb - br->trans_buf; return 0; } } } if (valid_word) { /* update poll timeout when we get valid word */ poll_timeout = jiffies + SPI_AVMM_XFER_TIMEOUT; last_try = false; } else { /* * We timeout when rx keeps invalid for some time. But * it is possible we are scheduled out for long time * after a spi_read. So when we are scheduled in, a SW * timeout happens. But actually HW may have worked fine and * has been ready long time ago. So we need to do an extra * read, if we get a valid word then we could continue rx, * otherwise real a HW issue happens. */ if (last_try) return -ETIMEDOUT; if (time_after(jiffies, poll_timeout)) last_try = true; } } /* * We have used out all transfer layer buffer but cannot find the end * of the byte stream. */ dev_err(dev, "%s transfer buffer is full but rx doesn't end\n", __func__); return -EFAULT; } /* * For read transactions, the avmm bus will directly return register values * without transaction response header. */ static int br_rd_trans_rx_parse(struct spi_avmm_bridge *br, u32 *val, unsigned int expected_count) { unsigned int i, trans_len = br->trans_len; __le32 *data; if (expected_count * SPI_AVMM_VAL_SIZE != trans_len) return -EFAULT; data = (__le32 *)br->trans_buf; for (i = 0; i < expected_count; i++) *val++ = le32_to_cpu(*data++); return 0; } /* * For write transactions, the slave will return a transaction response * header. */ static int br_wr_trans_rx_parse(struct spi_avmm_bridge *br, unsigned int expected_count) { unsigned int trans_len = br->trans_len; struct trans_resp_header *resp; u8 code; u16 val_len; if (trans_len != TRANS_RESP_HD_SIZE) return -EFAULT; resp = (struct trans_resp_header *)br->trans_buf; code = resp->r_code ^ 0x80; val_len = be16_to_cpu(resp->size); if (!val_len || val_len != expected_count * SPI_AVMM_VAL_SIZE) return -EFAULT; /* error out if the trans code doesn't align with the val size */ if ((val_len == SPI_AVMM_VAL_SIZE && code != TRANS_CODE_WRITE) || (val_len > SPI_AVMM_VAL_SIZE && code != TRANS_CODE_SEQ_WRITE)) return -EFAULT; return 0; } static int do_reg_access(void *context, bool is_read, unsigned int reg, unsigned int *value, unsigned int count) { struct spi_avmm_bridge *br = context; int ret; /* invalidate bridge buffers first */ br->trans_len = 0; br->phy_len = 0; ret = br_trans_tx_prepare(br, is_read, reg, value, count); if (ret) return ret; ret = br_pkt_phy_tx_prepare(br); if (ret) return ret; ret = br_do_tx(br); if (ret) return ret; ret = br_do_rx_and_pkt_phy_parse(br); if (ret) return ret; if (is_read) return br_rd_trans_rx_parse(br, value, count); else return br_wr_trans_rx_parse(br, count); } static int regmap_spi_avmm_gather_write(void *context, const void *reg_buf, size_t reg_len, const void *val_buf, size_t val_len) { if (reg_len != SPI_AVMM_REG_SIZE) return -EINVAL; if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE)) return -EINVAL; return do_reg_access(context, false, *(u32 *)reg_buf, (u32 *)val_buf, val_len / SPI_AVMM_VAL_SIZE); } static int regmap_spi_avmm_write(void *context, const void *data, size_t bytes) { if (bytes < SPI_AVMM_REG_SIZE + SPI_AVMM_VAL_SIZE) return -EINVAL; return regmap_spi_avmm_gather_write(context, data, SPI_AVMM_REG_SIZE, data + SPI_AVMM_REG_SIZE, bytes - SPI_AVMM_REG_SIZE); } static int regmap_spi_avmm_read(void *context, const void *reg_buf, size_t reg_len, void *val_buf, size_t val_len) { if (reg_len != SPI_AVMM_REG_SIZE) return -EINVAL; if (!IS_ALIGNED(val_len, SPI_AVMM_VAL_SIZE)) return -EINVAL; return do_reg_access(context, true, *(u32 *)reg_buf, val_buf, (val_len / SPI_AVMM_VAL_SIZE)); } static struct spi_avmm_bridge * spi_avmm_bridge_ctx_gen(struct spi_device *spi) { struct spi_avmm_bridge *br; if (!spi) return ERR_PTR(-ENODEV); /* Only support BPW == 8 or 32 now. Try 32 BPW first. */ spi->mode = SPI_MODE_1; spi->bits_per_word = 32; if (spi_setup(spi)) { spi->bits_per_word = 8; if (spi_setup(spi)) return ERR_PTR(-EINVAL); } br = kzalloc(sizeof(*br), GFP_KERNEL); if (!br) return ERR_PTR(-ENOMEM); br->spi = spi; br->word_len = spi->bits_per_word / 8; if (br->word_len == 4) { /* * The protocol requires little endian byte order but MSB * first. So driver needs to swap the byte order word by word * if word length > 1. */ br->swap_words = br_swap_words_32; } return br; } static void spi_avmm_bridge_ctx_free(void *context) { kfree(context); } static const struct regmap_bus regmap_spi_avmm_bus = { .write = regmap_spi_avmm_write, .gather_write = regmap_spi_avmm_gather_write, .read = regmap_spi_avmm_read, .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, .val_format_endian_default = REGMAP_ENDIAN_NATIVE, .max_raw_read = SPI_AVMM_VAL_SIZE * MAX_READ_CNT, .max_raw_write = SPI_AVMM_VAL_SIZE * MAX_WRITE_CNT, .free_context = spi_avmm_bridge_ctx_free, }; struct regmap *__regmap_init_spi_avmm(struct spi_device *spi, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name) { struct spi_avmm_bridge *bridge; struct regmap *map; bridge = spi_avmm_bridge_ctx_gen(spi); if (IS_ERR(bridge)) return ERR_CAST(bridge); map = __regmap_init(&spi->dev, &regmap_spi_avmm_bus, bridge, config, lock_key, lock_name); if (IS_ERR(map)) { spi_avmm_bridge_ctx_free(bridge); return ERR_CAST(map); } return map; } EXPORT_SYMBOL_GPL(__regmap_init_spi_avmm); struct regmap *__devm_regmap_init_spi_avmm(struct spi_device *spi, const struct regmap_config *config, struct lock_class_key *lock_key, const char *lock_name) { struct spi_avmm_bridge *bridge; struct regmap *map; bridge = spi_avmm_bridge_ctx_gen(spi); if (IS_ERR(bridge)) return ERR_CAST(bridge); map = __devm_regmap_init(&spi->dev, &regmap_spi_avmm_bus, bridge, config, lock_key, lock_name); if (IS_ERR(map)) { spi_avmm_bridge_ctx_free(bridge); return ERR_CAST(map); } return map; } EXPORT_SYMBOL_GPL(__devm_regmap_init_spi_avmm); MODULE_DESCRIPTION("Register map access API - SPI AVMM support"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0+ /* * Rockchip AXI PCIe host controller driver * * Copyright (c) 2016 Rockchip, Inc. * * Author: Shawn Lin <[email protected]> * Wenrui Li <[email protected]> * * Bits taken from Synopsys DesignWare Host controller driver and * ARM PCI Host generic driver. */ #include <linux/bitrev.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/irq.h> #include <linux/irqchip/chained_irq.h> #include <linux/irqdomain.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_pci.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/regmap.h> #include "../pci.h" #include "pcie-rockchip.h" static void rockchip_pcie_enable_bw_int(struct rockchip_pcie *rockchip) { u32 status; status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); status |= (PCI_EXP_LNKCTL_LBMIE | PCI_EXP_LNKCTL_LABIE); rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); } static void rockchip_pcie_clr_bw_int(struct rockchip_pcie *rockchip) { u32 status; status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); status |= (PCI_EXP_LNKSTA_LBMS | PCI_EXP_LNKSTA_LABS) << 16; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); } static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) { u32 val; /* Update Tx credit maximum update interval */ val = rockchip_pcie_read(rockchip, PCIE_CORE_TXCREDIT_CFG1); val &= ~PCIE_CORE_TXCREDIT_CFG1_MUI_MASK; val |= PCIE_CORE_TXCREDIT_CFG1_MUI_ENCODE(24000); /* ns */ rockchip_pcie_write(rockchip, val, PCIE_CORE_TXCREDIT_CFG1); } static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, struct pci_bus *bus, int dev) { /* * Access only one slot on each root port. * Do not read more than one device on the bus directly attached * to RC's downstream side. */ if (pci_is_root_bus(bus) || pci_is_root_bus(bus->parent)) return dev == 0; return 1; } static u8 rockchip_pcie_lane_map(struct rockchip_pcie *rockchip) { u32 val; u8 map; if (rockchip->legacy_phy) return GENMASK(MAX_LANE_NUM - 1, 0); val = rockchip_pcie_read(rockchip, PCIE_CORE_LANE_MAP); map = val & PCIE_CORE_LANE_MAP_MASK; /* The link may be using a reverse-indexed mapping. */ if (val & PCIE_CORE_LANE_MAP_REVERSE) map = bitrev8(map) >> 4; return map; } static int rockchip_pcie_rd_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 *val) { void __iomem *addr; addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + where; if (!IS_ALIGNED((uintptr_t)addr, size)) { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } if (size == 4) { *val = readl(addr); } else if (size == 2) { *val = readw(addr); } else if (size == 1) { *val = readb(addr); } else { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } static int rockchip_pcie_wr_own_conf(struct rockchip_pcie *rockchip, int where, int size, u32 val) { u32 mask, tmp, offset; void __iomem *addr; offset = where & ~0x3; addr = rockchip->apb_base + PCIE_RC_CONFIG_NORMAL_BASE + offset; if (size == 4) { writel(val, addr); return PCIBIOS_SUCCESSFUL; } mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); /* * N.B. This read/modify/write isn't safe in general because it can * corrupt RW1C bits in adjacent registers. But the hardware * doesn't support smaller writes. */ tmp = readl(addr) & mask; tmp |= val << ((where & 0x3) * 8); writel(tmp, addr); return PCIBIOS_SUCCESSFUL; } static int rockchip_pcie_rd_other_conf(struct rockchip_pcie *rockchip, struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { void __iomem *addr; addr = rockchip->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); if (!IS_ALIGNED((uintptr_t)addr, size)) { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } if (pci_is_root_bus(bus->parent)) rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE0_CFG); else rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE1_CFG); if (size == 4) { *val = readl(addr); } else if (size == 2) { *val = readw(addr); } else if (size == 1) { *val = readb(addr); } else { *val = 0; return PCIBIOS_BAD_REGISTER_NUMBER; } return PCIBIOS_SUCCESSFUL; } static int rockchip_pcie_wr_other_conf(struct rockchip_pcie *rockchip, struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { void __iomem *addr; addr = rockchip->reg_base + PCIE_ECAM_OFFSET(bus->number, devfn, where); if (!IS_ALIGNED((uintptr_t)addr, size)) return PCIBIOS_BAD_REGISTER_NUMBER; if (pci_is_root_bus(bus->parent)) rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE0_CFG); else rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE1_CFG); if (size == 4) writel(val, addr); else if (size == 2) writew(val, addr); else if (size == 1) writeb(val, addr); else return PCIBIOS_BAD_REGISTER_NUMBER; return PCIBIOS_SUCCESSFUL; } static int rockchip_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 *val) { struct rockchip_pcie *rockchip = bus->sysdata; if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; if (pci_is_root_bus(bus)) return rockchip_pcie_rd_own_conf(rockchip, where, size, val); return rockchip_pcie_rd_other_conf(rockchip, bus, devfn, where, size, val); } static int rockchip_pcie_wr_conf(struct pci_bus *bus, u32 devfn, int where, int size, u32 val) { struct rockchip_pcie *rockchip = bus->sysdata; if (!rockchip_pcie_valid_device(rockchip, bus, PCI_SLOT(devfn))) return PCIBIOS_DEVICE_NOT_FOUND; if (pci_is_root_bus(bus)) return rockchip_pcie_wr_own_conf(rockchip, where, size, val); return rockchip_pcie_wr_other_conf(rockchip, bus, devfn, where, size, val); } static struct pci_ops rockchip_pcie_ops = { .read = rockchip_pcie_rd_conf, .write = rockchip_pcie_wr_conf, }; static void rockchip_pcie_set_power_limit(struct rockchip_pcie *rockchip) { int curr; u32 status, scale, power; if (IS_ERR(rockchip->vpcie3v3)) return; /* * Set RC's captured slot power limit and scale if * vpcie3v3 available. The default values are both zero * which means the software should set these two according * to the actual power supply. */ curr = regulator_get_current_limit(rockchip->vpcie3v3); if (curr <= 0) return; scale = 3; /* 0.001x */ curr = curr / 1000; /* convert to mA */ power = (curr * 3300) / 1000; /* milliwatt */ while (power > PCIE_RC_CONFIG_DCR_CSPL_LIMIT) { if (!scale) { dev_warn(rockchip->dev, "invalid power supply\n"); return; } scale--; power = power / 10; } status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCR); status |= (power << PCIE_RC_CONFIG_DCR_CSPL_SHIFT) | (scale << PCIE_RC_CONFIG_DCR_CPLS_SHIFT); rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCR); } /** * rockchip_pcie_host_init_port - Initialize hardware * @rockchip: PCIe port information */ static int rockchip_pcie_host_init_port(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int err, i = MAX_LANE_NUM; u32 status; gpiod_set_value_cansleep(rockchip->perst_gpio, 0); err = rockchip_pcie_init_port(rockchip); if (err) return err; /* Fix the transmitted FTS count desired to exit from L0s. */ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL_PLC1); status = (status & ~PCIE_CORE_CTRL_PLC1_FTS_MASK) | (PCIE_CORE_CTRL_PLC1_FTS_CNT << PCIE_CORE_CTRL_PLC1_FTS_SHIFT); rockchip_pcie_write(rockchip, status, PCIE_CORE_CTRL_PLC1); rockchip_pcie_set_power_limit(rockchip); /* Set RC's clock architecture as common clock */ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); status |= PCI_EXP_LNKSTA_SLC << 16; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); /* Set RC's RCB to 128 */ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); status |= PCI_EXP_LNKCTL_RCB; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); /* Enable Gen1 training */ rockchip_pcie_write(rockchip, PCIE_CLIENT_LINK_TRAIN_ENABLE, PCIE_CLIENT_CONFIG); msleep(PCIE_T_PVPERL_MS); gpiod_set_value_cansleep(rockchip->perst_gpio, 1); msleep(PCIE_T_RRS_READY_MS); /* 500ms timeout value should be enough for Gen1/2 training */ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_BASIC_STATUS1, status, PCIE_LINK_UP(status), 20, 500 * USEC_PER_MSEC); if (err) { dev_err(dev, "PCIe link training gen1 timeout!\n"); goto err_power_off_phy; } if (rockchip->link_gen == 2) { /* * Enable retrain for gen2. This should be configured only after * gen1 finished. */ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LCS); status |= PCI_EXP_LNKCTL_RL; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LCS); err = readl_poll_timeout(rockchip->apb_base + PCIE_CORE_CTRL, status, PCIE_LINK_IS_GEN2(status), 20, 500 * USEC_PER_MSEC); if (err) dev_dbg(dev, "PCIe link training gen2 timeout, fall back to gen1!\n"); } /* Check the final link width from negotiated lane counter from MGMT */ status = rockchip_pcie_read(rockchip, PCIE_CORE_CTRL); status = 0x1 << ((status & PCIE_CORE_PL_CONF_LANE_MASK) >> PCIE_CORE_PL_CONF_LANE_SHIFT); dev_dbg(dev, "current link width is x%d\n", status); /* Power off unused lane(s) */ rockchip->lanes_map = rockchip_pcie_lane_map(rockchip); for (i = 0; i < MAX_LANE_NUM; i++) { if (!(rockchip->lanes_map & BIT(i))) { dev_dbg(dev, "idling lane %d\n", i); phy_power_off(rockchip->phys[i]); } } rockchip_pcie_write(rockchip, ROCKCHIP_VENDOR_ID, PCIE_CORE_CONFIG_VENDOR); rockchip_pcie_write(rockchip, PCI_CLASS_BRIDGE_PCI_NORMAL << 8, PCIE_RC_CONFIG_RID_CCR); /* Clear THP cap's next cap pointer to remove L1 substate cap */ status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_THP_CAP); status &= ~PCIE_RC_CONFIG_THP_CAP_NEXT_MASK; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_THP_CAP); /* Clear L0s from RC's link cap */ if (of_property_read_bool(dev->of_node, "aspm-no-l0s")) { status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_LINK_CAP); status &= ~PCIE_RC_CONFIG_LINK_CAP_L0S; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_LINK_CAP); } status = rockchip_pcie_read(rockchip, PCIE_RC_CONFIG_DCSR); status &= ~PCIE_RC_CONFIG_DCSR_MPS_MASK; status |= PCIE_RC_CONFIG_DCSR_MPS_256; rockchip_pcie_write(rockchip, status, PCIE_RC_CONFIG_DCSR); return 0; err_power_off_phy: while (i--) phy_power_off(rockchip->phys[i]); i = MAX_LANE_NUM; while (i--) phy_exit(rockchip->phys[i]); return err; } static irqreturn_t rockchip_pcie_subsys_irq_handler(int irq, void *arg) { struct rockchip_pcie *rockchip = arg; struct device *dev = rockchip->dev; u32 reg; u32 sub_reg; reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); if (reg & PCIE_CLIENT_INT_LOCAL) { dev_dbg(dev, "local interrupt received\n"); sub_reg = rockchip_pcie_read(rockchip, PCIE_CORE_INT_STATUS); if (sub_reg & PCIE_CORE_INT_PRFPE) dev_dbg(dev, "parity error detected while reading from the PNP receive FIFO RAM\n"); if (sub_reg & PCIE_CORE_INT_CRFPE) dev_dbg(dev, "parity error detected while reading from the Completion Receive FIFO RAM\n"); if (sub_reg & PCIE_CORE_INT_RRPE) dev_dbg(dev, "parity error detected while reading from replay buffer RAM\n"); if (sub_reg & PCIE_CORE_INT_PRFO) dev_dbg(dev, "overflow occurred in the PNP receive FIFO\n"); if (sub_reg & PCIE_CORE_INT_CRFO) dev_dbg(dev, "overflow occurred in the completion receive FIFO\n"); if (sub_reg & PCIE_CORE_INT_RT) dev_dbg(dev, "replay timer timed out\n"); if (sub_reg & PCIE_CORE_INT_RTR) dev_dbg(dev, "replay timer rolled over after 4 transmissions of the same TLP\n"); if (sub_reg & PCIE_CORE_INT_PE) dev_dbg(dev, "phy error detected on receive side\n"); if (sub_reg & PCIE_CORE_INT_MTR) dev_dbg(dev, "malformed TLP received from the link\n"); if (sub_reg & PCIE_CORE_INT_UCR) dev_dbg(dev, "malformed TLP received from the link\n"); if (sub_reg & PCIE_CORE_INT_FCE) dev_dbg(dev, "an error was observed in the flow control advertisements from the other side\n"); if (sub_reg & PCIE_CORE_INT_CT) dev_dbg(dev, "a request timed out waiting for completion\n"); if (sub_reg & PCIE_CORE_INT_UTC) dev_dbg(dev, "unmapped TC error\n"); if (sub_reg & PCIE_CORE_INT_MMVC) dev_dbg(dev, "MSI mask register changes\n"); rockchip_pcie_write(rockchip, sub_reg, PCIE_CORE_INT_STATUS); } else if (reg & PCIE_CLIENT_INT_PHY) { dev_dbg(dev, "phy link changes\n"); rockchip_pcie_update_txcredit_mui(rockchip); rockchip_pcie_clr_bw_int(rockchip); } rockchip_pcie_write(rockchip, reg & PCIE_CLIENT_INT_LOCAL, PCIE_CLIENT_INT_STATUS); return IRQ_HANDLED; } static irqreturn_t rockchip_pcie_client_irq_handler(int irq, void *arg) { struct rockchip_pcie *rockchip = arg; struct device *dev = rockchip->dev; u32 reg; reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); if (reg & PCIE_CLIENT_INT_LEGACY_DONE) dev_dbg(dev, "legacy done interrupt received\n"); if (reg & PCIE_CLIENT_INT_MSG) dev_dbg(dev, "message done interrupt received\n"); if (reg & PCIE_CLIENT_INT_HOT_RST) dev_dbg(dev, "hot reset interrupt received\n"); if (reg & PCIE_CLIENT_INT_DPA) dev_dbg(dev, "dpa interrupt received\n"); if (reg & PCIE_CLIENT_INT_FATAL_ERR) dev_dbg(dev, "fatal error interrupt received\n"); if (reg & PCIE_CLIENT_INT_NFATAL_ERR) dev_dbg(dev, "no fatal error interrupt received\n"); if (reg & PCIE_CLIENT_INT_CORR_ERR) dev_dbg(dev, "correctable error interrupt received\n"); if (reg & PCIE_CLIENT_INT_PHY) dev_dbg(dev, "phy interrupt received\n"); rockchip_pcie_write(rockchip, reg & (PCIE_CLIENT_INT_LEGACY_DONE | PCIE_CLIENT_INT_MSG | PCIE_CLIENT_INT_HOT_RST | PCIE_CLIENT_INT_DPA | PCIE_CLIENT_INT_FATAL_ERR | PCIE_CLIENT_INT_NFATAL_ERR | PCIE_CLIENT_INT_CORR_ERR | PCIE_CLIENT_INT_PHY), PCIE_CLIENT_INT_STATUS); return IRQ_HANDLED; } static void rockchip_pcie_intx_handler(struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); struct rockchip_pcie *rockchip = irq_desc_get_handler_data(desc); struct device *dev = rockchip->dev; u32 reg; u32 hwirq; int ret; chained_irq_enter(chip, desc); reg = rockchip_pcie_read(rockchip, PCIE_CLIENT_INT_STATUS); reg = (reg & PCIE_CLIENT_INTR_MASK) >> PCIE_CLIENT_INTR_SHIFT; while (reg) { hwirq = ffs(reg) - 1; reg &= ~BIT(hwirq); ret = generic_handle_domain_irq(rockchip->irq_domain, hwirq); if (ret) dev_err(dev, "unexpected IRQ, INT%d\n", hwirq); } chained_irq_exit(chip, desc); } static int rockchip_pcie_setup_irq(struct rockchip_pcie *rockchip) { int irq, err; struct device *dev = rockchip->dev; struct platform_device *pdev = to_platform_device(dev); irq = platform_get_irq_byname(pdev, "sys"); if (irq < 0) return irq; err = devm_request_irq(dev, irq, rockchip_pcie_subsys_irq_handler, IRQF_SHARED, "pcie-sys", rockchip); if (err) { dev_err(dev, "failed to request PCIe subsystem IRQ\n"); return err; } irq = platform_get_irq_byname(pdev, "legacy"); if (irq < 0) return irq; irq_set_chained_handler_and_data(irq, rockchip_pcie_intx_handler, rockchip); irq = platform_get_irq_byname(pdev, "client"); if (irq < 0) return irq; err = devm_request_irq(dev, irq, rockchip_pcie_client_irq_handler, IRQF_SHARED, "pcie-client", rockchip); if (err) { dev_err(dev, "failed to request PCIe client IRQ\n"); return err; } return 0; } /** * rockchip_pcie_parse_host_dt - Parse Device Tree * @rockchip: PCIe port information * * Return: '0' on success and error value on failure */ static int rockchip_pcie_parse_host_dt(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int err; err = rockchip_pcie_parse_dt(rockchip); if (err) return err; rockchip->vpcie12v = devm_regulator_get_optional(dev, "vpcie12v"); if (IS_ERR(rockchip->vpcie12v)) { if (PTR_ERR(rockchip->vpcie12v) != -ENODEV) return PTR_ERR(rockchip->vpcie12v); dev_info(dev, "no vpcie12v regulator found\n"); } rockchip->vpcie3v3 = devm_regulator_get_optional(dev, "vpcie3v3"); if (IS_ERR(rockchip->vpcie3v3)) { if (PTR_ERR(rockchip->vpcie3v3) != -ENODEV) return PTR_ERR(rockchip->vpcie3v3); dev_info(dev, "no vpcie3v3 regulator found\n"); } rockchip->vpcie1v8 = devm_regulator_get(dev, "vpcie1v8"); if (IS_ERR(rockchip->vpcie1v8)) return PTR_ERR(rockchip->vpcie1v8); rockchip->vpcie0v9 = devm_regulator_get(dev, "vpcie0v9"); if (IS_ERR(rockchip->vpcie0v9)) return PTR_ERR(rockchip->vpcie0v9); return 0; } static int rockchip_pcie_set_vpcie(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; int err; if (!IS_ERR(rockchip->vpcie12v)) { err = regulator_enable(rockchip->vpcie12v); if (err) { dev_err(dev, "fail to enable vpcie12v regulator\n"); goto err_out; } } if (!IS_ERR(rockchip->vpcie3v3)) { err = regulator_enable(rockchip->vpcie3v3); if (err) { dev_err(dev, "fail to enable vpcie3v3 regulator\n"); goto err_disable_12v; } } err = regulator_enable(rockchip->vpcie1v8); if (err) { dev_err(dev, "fail to enable vpcie1v8 regulator\n"); goto err_disable_3v3; } err = regulator_enable(rockchip->vpcie0v9); if (err) { dev_err(dev, "fail to enable vpcie0v9 regulator\n"); goto err_disable_1v8; } return 0; err_disable_1v8: regulator_disable(rockchip->vpcie1v8); err_disable_3v3: if (!IS_ERR(rockchip->vpcie3v3)) regulator_disable(rockchip->vpcie3v3); err_disable_12v: if (!IS_ERR(rockchip->vpcie12v)) regulator_disable(rockchip->vpcie12v); err_out: return err; } static void rockchip_pcie_enable_interrupts(struct rockchip_pcie *rockchip) { rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) & (~PCIE_CLIENT_INT_CLI), PCIE_CLIENT_INT_MASK); rockchip_pcie_write(rockchip, (u32)(~PCIE_CORE_INT), PCIE_CORE_INT_MASK); rockchip_pcie_enable_bw_int(rockchip); } static int rockchip_pcie_intx_map(struct irq_domain *domain, unsigned int irq, irq_hw_number_t hwirq) { irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); irq_set_chip_data(irq, domain->host_data); return 0; } static const struct irq_domain_ops intx_domain_ops = { .map = rockchip_pcie_intx_map, }; static int rockchip_pcie_init_irq_domain(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; struct device_node *intc = of_get_next_child(dev->of_node, NULL); if (!intc) { dev_err(dev, "missing child interrupt-controller node\n"); return -EINVAL; } rockchip->irq_domain = irq_domain_add_linear(intc, PCI_NUM_INTX, &intx_domain_ops, rockchip); of_node_put(intc); if (!rockchip->irq_domain) { dev_err(dev, "failed to get a INTx IRQ domain\n"); return -EINVAL; } return 0; } static int rockchip_pcie_prog_ob_atu(struct rockchip_pcie *rockchip, int region_no, int type, u8 num_pass_bits, u32 lower_addr, u32 upper_addr) { u32 ob_addr_0; u32 ob_addr_1; u32 ob_desc_0; u32 aw_offset; if (region_no >= MAX_AXI_WRAPPER_REGION_NUM) return -EINVAL; if (num_pass_bits + 1 < 8) return -EINVAL; if (num_pass_bits > 63) return -EINVAL; if (region_no == 0) { if (AXI_REGION_0_SIZE < (2ULL << num_pass_bits)) return -EINVAL; } if (region_no != 0) { if (AXI_REGION_SIZE < (2ULL << num_pass_bits)) return -EINVAL; } aw_offset = (region_no << OB_REG_SIZE_SHIFT); ob_addr_0 = num_pass_bits & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS; ob_addr_0 |= lower_addr & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR; ob_addr_1 = upper_addr; ob_desc_0 = (1 << 23 | type); rockchip_pcie_write(rockchip, ob_addr_0, PCIE_CORE_OB_REGION_ADDR0 + aw_offset); rockchip_pcie_write(rockchip, ob_addr_1, PCIE_CORE_OB_REGION_ADDR1 + aw_offset); rockchip_pcie_write(rockchip, ob_desc_0, PCIE_CORE_OB_REGION_DESC0 + aw_offset); rockchip_pcie_write(rockchip, 0, PCIE_CORE_OB_REGION_DESC1 + aw_offset); return 0; } static int rockchip_pcie_prog_ib_atu(struct rockchip_pcie *rockchip, int region_no, u8 num_pass_bits, u32 lower_addr, u32 upper_addr) { u32 ib_addr_0; u32 ib_addr_1; u32 aw_offset; if (region_no > MAX_AXI_IB_ROOTPORT_REGION_NUM) return -EINVAL; if (num_pass_bits + 1 < MIN_AXI_ADDR_BITS_PASSED) return -EINVAL; if (num_pass_bits > 63) return -EINVAL; aw_offset = (region_no << IB_ROOT_PORT_REG_SIZE_SHIFT); ib_addr_0 = num_pass_bits & PCIE_CORE_IB_REGION_ADDR0_NUM_BITS; ib_addr_0 |= (lower_addr << 8) & PCIE_CORE_IB_REGION_ADDR0_LO_ADDR; ib_addr_1 = upper_addr; rockchip_pcie_write(rockchip, ib_addr_0, PCIE_RP_IB_ADDR0 + aw_offset); rockchip_pcie_write(rockchip, ib_addr_1, PCIE_RP_IB_ADDR1 + aw_offset); return 0; } static int rockchip_pcie_cfg_atu(struct rockchip_pcie *rockchip) { struct device *dev = rockchip->dev; struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip); struct resource_entry *entry; u64 pci_addr, size; int offset; int err; int reg_no; rockchip_pcie_cfg_configuration_accesses(rockchip, AXI_WRAPPER_TYPE0_CFG); entry = resource_list_first_type(&bridge->windows, IORESOURCE_MEM); if (!entry) return -ENODEV; size = resource_size(entry->res); pci_addr = entry->res->start - entry->offset; rockchip->msg_bus_addr = pci_addr; for (reg_no = 0; reg_no < (size >> 20); reg_no++) { err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1, AXI_WRAPPER_MEM_WRITE, 20 - 1, pci_addr + (reg_no << 20), 0); if (err) { dev_err(dev, "program RC mem outbound ATU failed\n"); return err; } } err = rockchip_pcie_prog_ib_atu(rockchip, 2, 32 - 1, 0x0, 0); if (err) { dev_err(dev, "program RC mem inbound ATU failed\n"); return err; } entry = resource_list_first_type(&bridge->windows, IORESOURCE_IO); if (!entry) return -ENODEV; /* store the register number offset to program RC io outbound ATU */ offset = size >> 20; size = resource_size(entry->res); pci_addr = entry->res->start - entry->offset; for (reg_no = 0; reg_no < (size >> 20); reg_no++) { err = rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset, AXI_WRAPPER_IO_WRITE, 20 - 1, pci_addr + (reg_no << 20), 0); if (err) { dev_err(dev, "program RC io outbound ATU failed\n"); return err; } } /* assign message regions */ rockchip_pcie_prog_ob_atu(rockchip, reg_no + 1 + offset, AXI_WRAPPER_NOR_MSG, 20 - 1, 0, 0); rockchip->msg_bus_addr += ((reg_no + offset) << 20); return err; } static int rockchip_pcie_wait_l2(struct rockchip_pcie *rockchip) { u32 value; int err; /* send PME_TURN_OFF message */ writel(0x0, rockchip->msg_region + PCIE_RC_SEND_PME_OFF); /* read LTSSM and wait for falling into L2 link state */ err = readl_poll_timeout(rockchip->apb_base + PCIE_CLIENT_DEBUG_OUT_0, value, PCIE_LINK_IS_L2(value), 20, jiffies_to_usecs(5 * HZ)); if (err) { dev_err(rockchip->dev, "PCIe link enter L2 timeout!\n"); return err; } return 0; } static int rockchip_pcie_suspend_noirq(struct device *dev) { struct rockchip_pcie *rockchip = dev_get_drvdata(dev); int ret; /* disable core and cli int since we don't need to ack PME_ACK */ rockchip_pcie_write(rockchip, (PCIE_CLIENT_INT_CLI << 16) | PCIE_CLIENT_INT_CLI, PCIE_CLIENT_INT_MASK); rockchip_pcie_write(rockchip, (u32)PCIE_CORE_INT, PCIE_CORE_INT_MASK); ret = rockchip_pcie_wait_l2(rockchip); if (ret) { rockchip_pcie_enable_interrupts(rockchip); return ret; } rockchip_pcie_deinit_phys(rockchip); rockchip_pcie_disable_clocks(rockchip); regulator_disable(rockchip->vpcie0v9); return ret; } static int rockchip_pcie_resume_noirq(struct device *dev) { struct rockchip_pcie *rockchip = dev_get_drvdata(dev); int err; err = regulator_enable(rockchip->vpcie0v9); if (err) { dev_err(dev, "fail to enable vpcie0v9 regulator\n"); return err; } err = rockchip_pcie_enable_clocks(rockchip); if (err) goto err_disable_0v9; err = rockchip_pcie_host_init_port(rockchip); if (err) goto err_pcie_resume; err = rockchip_pcie_cfg_atu(rockchip); if (err) goto err_err_deinit_port; /* Need this to enter L1 again */ rockchip_pcie_update_txcredit_mui(rockchip); rockchip_pcie_enable_interrupts(rockchip); return 0; err_err_deinit_port: rockchip_pcie_deinit_phys(rockchip); err_pcie_resume: rockchip_pcie_disable_clocks(rockchip); err_disable_0v9: regulator_disable(rockchip->vpcie0v9); return err; } static int rockchip_pcie_probe(struct platform_device *pdev) { struct rockchip_pcie *rockchip; struct device *dev = &pdev->dev; struct pci_host_bridge *bridge; int err; if (!dev->of_node) return -ENODEV; bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rockchip)); if (!bridge) return -ENOMEM; rockchip = pci_host_bridge_priv(bridge); platform_set_drvdata(pdev, rockchip); rockchip->dev = dev; rockchip->is_rc = true; err = rockchip_pcie_parse_host_dt(rockchip); if (err) return err; err = rockchip_pcie_enable_clocks(rockchip); if (err) return err; err = rockchip_pcie_set_vpcie(rockchip); if (err) { dev_err(dev, "failed to set vpcie regulator\n"); goto err_set_vpcie; } err = rockchip_pcie_host_init_port(rockchip); if (err) goto err_vpcie; err = rockchip_pcie_init_irq_domain(rockchip); if (err < 0) goto err_deinit_port; err = rockchip_pcie_cfg_atu(rockchip); if (err) goto err_remove_irq_domain; rockchip->msg_region = devm_ioremap(dev, rockchip->msg_bus_addr, SZ_1M); if (!rockchip->msg_region) { err = -ENOMEM; goto err_remove_irq_domain; } bridge->sysdata = rockchip; bridge->ops = &rockchip_pcie_ops; err = rockchip_pcie_setup_irq(rockchip); if (err) goto err_remove_irq_domain; rockchip_pcie_enable_interrupts(rockchip); err = pci_host_probe(bridge); if (err < 0) goto err_remove_irq_domain; return 0; err_remove_irq_domain: irq_domain_remove(rockchip->irq_domain); err_deinit_port: rockchip_pcie_deinit_phys(rockchip); err_vpcie: if (!IS_ERR(rockchip->vpcie12v)) regulator_disable(rockchip->vpcie12v); if (!IS_ERR(rockchip->vpcie3v3)) regulator_disable(rockchip->vpcie3v3); regulator_disable(rockchip->vpcie1v8); regulator_disable(rockchip->vpcie0v9); err_set_vpcie: rockchip_pcie_disable_clocks(rockchip); return err; } static void rockchip_pcie_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rockchip_pcie *rockchip = dev_get_drvdata(dev); struct pci_host_bridge *bridge = pci_host_bridge_from_priv(rockchip); pci_stop_root_bus(bridge->bus); pci_remove_root_bus(bridge->bus); irq_domain_remove(rockchip->irq_domain); rockchip_pcie_deinit_phys(rockchip); rockchip_pcie_disable_clocks(rockchip); if (!IS_ERR(rockchip->vpcie12v)) regulator_disable(rockchip->vpcie12v); if (!IS_ERR(rockchip->vpcie3v3)) regulator_disable(rockchip->vpcie3v3); regulator_disable(rockchip->vpcie1v8); regulator_disable(rockchip->vpcie0v9); } static const struct dev_pm_ops rockchip_pcie_pm_ops = { NOIRQ_SYSTEM_SLEEP_PM_OPS(rockchip_pcie_suspend_noirq, rockchip_pcie_resume_noirq) }; static const struct of_device_id rockchip_pcie_of_match[] = { { .compatible = "rockchip,rk3399-pcie", }, {} }; MODULE_DEVICE_TABLE(of, rockchip_pcie_of_match); static struct platform_driver rockchip_pcie_driver = { .driver = { .name = "rockchip-pcie", .of_match_table = rockchip_pcie_of_match, .pm = &rockchip_pcie_pm_ops, }, .probe = rockchip_pcie_probe, .remove = rockchip_pcie_remove, }; module_platform_driver(rockchip_pcie_driver); MODULE_AUTHOR("Rockchip Inc"); MODULE_DESCRIPTION("Rockchip AXI PCIe driver"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2017-2018, Intel Corporation * Copyright (C) 2015 Altera Corporation */ #ifndef _ALTERA_EDAC_H #define _ALTERA_EDAC_H #include <linux/arm-smccc.h> #include <linux/edac.h> #include <linux/types.h> /* SDRAM Controller CtrlCfg Register */ #define CV_CTLCFG_OFST 0x00 /* SDRAM Controller CtrlCfg Register Bit Masks */ #define CV_CTLCFG_ECC_EN 0x400 #define CV_CTLCFG_ECC_CORR_EN 0x800 #define CV_CTLCFG_GEN_SB_ERR 0x2000 #define CV_CTLCFG_GEN_DB_ERR 0x4000 #define CV_CTLCFG_ECC_AUTO_EN (CV_CTLCFG_ECC_EN) /* SDRAM Controller Address Width Register */ #define CV_DRAMADDRW_OFST 0x2C /* SDRAM Controller Address Widths Field Register */ #define DRAMADDRW_COLBIT_MASK 0x001F #define DRAMADDRW_COLBIT_SHIFT 0 #define DRAMADDRW_ROWBIT_MASK 0x03E0 #define DRAMADDRW_ROWBIT_SHIFT 5 #define CV_DRAMADDRW_BANKBIT_MASK 0x1C00 #define CV_DRAMADDRW_BANKBIT_SHIFT 10 #define CV_DRAMADDRW_CSBIT_MASK 0xE000 #define CV_DRAMADDRW_CSBIT_SHIFT 13 /* SDRAM Controller Interface Data Width Register */ #define CV_DRAMIFWIDTH_OFST 0x30 /* SDRAM Controller Interface Data Width Defines */ #define CV_DRAMIFWIDTH_16B_ECC 24 #define CV_DRAMIFWIDTH_32B_ECC 40 /* SDRAM Controller DRAM Status Register */ #define CV_DRAMSTS_OFST 0x38 /* SDRAM Controller DRAM Status Register Bit Masks */ #define CV_DRAMSTS_SBEERR 0x04 #define CV_DRAMSTS_DBEERR 0x08 #define CV_DRAMSTS_CORR_DROP 0x10 /* SDRAM Controller DRAM IRQ Register */ #define CV_DRAMINTR_OFST 0x3C /* SDRAM Controller DRAM IRQ Register Bit Masks */ #define CV_DRAMINTR_INTREN 0x01 #define CV_DRAMINTR_SBEMASK 0x02 #define CV_DRAMINTR_DBEMASK 0x04 #define CV_DRAMINTR_CORRDROPMASK 0x08 #define CV_DRAMINTR_INTRCLR 0x10 /* SDRAM Controller Single Bit Error Count Register */ #define CV_SBECOUNT_OFST 0x40 /* SDRAM Controller Double Bit Error Count Register */ #define CV_DBECOUNT_OFST 0x44 /* SDRAM Controller ECC Error Address Register */ #define CV_ERRADDR_OFST 0x48 /*-----------------------------------------*/ /* SDRAM Controller EccCtrl Register */ #define A10_ECCCTRL1_OFST 0x00 /* SDRAM Controller EccCtrl Register Bit Masks */ #define A10_ECCCTRL1_ECC_EN 0x001 #define A10_ECCCTRL1_CNT_RST 0x010 #define A10_ECCCTRL1_AWB_CNT_RST 0x100 #define A10_ECC_CNT_RESET_MASK (A10_ECCCTRL1_CNT_RST | \ A10_ECCCTRL1_AWB_CNT_RST) /* SDRAM Controller Address Width Register */ #define CV_DRAMADDRW 0xFFC2502C #define A10_DRAMADDRW 0xFFCFA0A8 #define S10_DRAMADDRW 0xF80110E0 /* SDRAM Controller Address Widths Field Register */ #define DRAMADDRW_COLBIT_MASK 0x001F #define DRAMADDRW_COLBIT_SHIFT 0 #define DRAMADDRW_ROWBIT_MASK 0x03E0 #define DRAMADDRW_ROWBIT_SHIFT 5 #define CV_DRAMADDRW_BANKBIT_MASK 0x1C00 #define CV_DRAMADDRW_BANKBIT_SHIFT 10 #define CV_DRAMADDRW_CSBIT_MASK 0xE000 #define CV_DRAMADDRW_CSBIT_SHIFT 13 #define A10_DRAMADDRW_BANKBIT_MASK 0x3C00 #define A10_DRAMADDRW_BANKBIT_SHIFT 10 #define A10_DRAMADDRW_GRPBIT_MASK 0xC000 #define A10_DRAMADDRW_GRPBIT_SHIFT 14 #define A10_DRAMADDRW_CSBIT_MASK 0x70000 #define A10_DRAMADDRW_CSBIT_SHIFT 16 /* SDRAM Controller Interface Data Width Register */ #define CV_DRAMIFWIDTH 0xFFC25030 #define A10_DRAMIFWIDTH 0xFFCFB008 #define S10_DRAMIFWIDTH 0xF8011008 /* SDRAM Controller Interface Data Width Defines */ #define CV_DRAMIFWIDTH_16B_ECC 24 #define CV_DRAMIFWIDTH_32B_ECC 40 #define A10_DRAMIFWIDTH_16B 0x0 #define A10_DRAMIFWIDTH_32B 0x1 #define A10_DRAMIFWIDTH_64B 0x2 /* SDRAM Controller DRAM IRQ Register */ #define A10_ERRINTEN_OFST 0x10 /* SDRAM Controller DRAM IRQ Register Bit Masks */ #define A10_ERRINTEN_SERRINTEN 0x01 #define A10_ERRINTEN_DERRINTEN 0x02 #define A10_ECC_IRQ_EN_MASK (A10_ERRINTEN_SERRINTEN | \ A10_ERRINTEN_DERRINTEN) /* SDRAM Interrupt Mode Register */ #define A10_INTMODE_OFST 0x1C #define A10_INTMODE_SB_INT 1 /* SDRAM Controller Error Status Register */ #define A10_INTSTAT_OFST 0x20 /* SDRAM Controller Error Status Register Bit Masks */ #define A10_INTSTAT_SBEERR 0x01 #define A10_INTSTAT_DBEERR 0x02 /* SDRAM Controller ECC Error Address Register */ #define A10_DERRADDR_OFST 0x2C #define A10_SERRADDR_OFST 0x30 /* SDRAM Controller ECC Diagnostic Register */ #define A10_DIAGINTTEST_OFST 0x24 #define A10_DIAGINT_TSERRA_MASK 0x0001 #define A10_DIAGINT_TDERRA_MASK 0x0100 #define A10_SBERR_IRQ 34 #define A10_DBERR_IRQ 32 /* SDRAM Single Bit Error Count Compare Set Register */ #define A10_SERRCNTREG_OFST 0x3C #define A10_SYMAN_INTMASK_CLR 0xFFD06098 #define A10_INTMASK_CLR_OFST 0x10 #define A10_DDR0_IRQ_MASK BIT(17) struct altr_sdram_prv_data { int ecc_ctrl_offset; int ecc_ctl_en_mask; int ecc_cecnt_offset; int ecc_uecnt_offset; int ecc_stat_offset; int ecc_stat_ce_mask; int ecc_stat_ue_mask; int ecc_saddr_offset; int ecc_daddr_offset; int ecc_irq_en_offset; int ecc_irq_en_mask; int ecc_irq_clr_offset; int ecc_irq_clr_mask; int ecc_cnt_rst_offset; int ecc_cnt_rst_mask; struct edac_dev_sysfs_attribute *eccmgr_sysfs_attr; int ecc_enable_mask; int ce_set_mask; int ue_set_mask; int ce_ue_trgr_offset; }; /* Altera SDRAM Memory Controller data */ struct altr_sdram_mc_data { struct regmap *mc_vbase; int sb_irq; int db_irq; const struct altr_sdram_prv_data *data; }; /************************** EDAC Device Defines **************************/ /***** General Device Trigger Defines *****/ #define ALTR_UE_TRIGGER_CHAR 'U' /* Trigger for UE */ #define ALTR_TRIGGER_READ_WRD_CNT 32 /* Line size x 4 */ #define ALTR_TRIG_OCRAM_BYTE_SIZE 128 /* Line size x 4 */ #define ALTR_TRIG_L2C_BYTE_SIZE 4096 /* Full Page */ /******* Cyclone5 and Arria5 Defines *******/ /* OCRAM ECC Management Group Defines */ #define ALTR_MAN_GRP_OCRAM_ECC_OFFSET 0x04 #define ALTR_OCR_ECC_REG_OFFSET 0x00 #define ALTR_OCR_ECC_EN BIT(0) #define ALTR_OCR_ECC_INJS BIT(1) #define ALTR_OCR_ECC_INJD BIT(2) #define ALTR_OCR_ECC_SERR BIT(3) #define ALTR_OCR_ECC_DERR BIT(4) /* L2 ECC Management Group Defines */ #define ALTR_MAN_GRP_L2_ECC_OFFSET 0x00 #define ALTR_L2_ECC_REG_OFFSET 0x00 #define ALTR_L2_ECC_EN BIT(0) #define ALTR_L2_ECC_INJS BIT(1) #define ALTR_L2_ECC_INJD BIT(2) /* Arria10 General ECC Block Module Defines */ #define ALTR_A10_ECC_CTRL_OFST 0x08 #define ALTR_A10_ECC_EN BIT(0) #define ALTR_A10_ECC_INITA BIT(16) #define ALTR_A10_ECC_INITB BIT(24) #define ALTR_A10_ECC_INITSTAT_OFST 0x0C #define ALTR_A10_ECC_INITCOMPLETEA BIT(0) #define ALTR_A10_ECC_INITCOMPLETEB BIT(8) #define ALTR_A10_ECC_ERRINTEN_OFST 0x10 #define ALTR_A10_ECC_ERRINTENS_OFST 0x14 #define ALTR_A10_ECC_ERRINTENR_OFST 0x18 #define ALTR_A10_ECC_SERRINTEN BIT(0) #define ALTR_A10_ECC_INTMODE_OFST 0x1C #define ALTR_A10_ECC_INTMODE BIT(0) #define ALTR_A10_ECC_INTSTAT_OFST 0x20 #define ALTR_A10_ECC_SERRPENA BIT(0) #define ALTR_A10_ECC_DERRPENA BIT(8) #define ALTR_A10_ECC_ERRPENA_MASK (ALTR_A10_ECC_SERRPENA | \ ALTR_A10_ECC_DERRPENA) #define ALTR_A10_ECC_SERRPENB BIT(16) #define ALTR_A10_ECC_DERRPENB BIT(24) #define ALTR_A10_ECC_ERRPENB_MASK (ALTR_A10_ECC_SERRPENB | \ ALTR_A10_ECC_DERRPENB) #define ALTR_A10_ECC_INTTEST_OFST 0x24 #define ALTR_A10_ECC_TSERRA BIT(0) #define ALTR_A10_ECC_TDERRA BIT(8) #define ALTR_A10_ECC_TSERRB BIT(16) #define ALTR_A10_ECC_TDERRB BIT(24) /* ECC Manager Defines */ #define A10_SYSMGR_ECC_INTMASK_SET_OFST 0x94 #define A10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98 #define A10_SYSMGR_ECC_INTMASK_OCRAM BIT(1) #define A10_SYSMGR_ECC_INTSTAT_SERR_OFST 0x9C #define A10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 #define A10_SYSMGR_ECC_INTSTAT_L2 BIT(0) #define A10_SYSMGR_ECC_INTSTAT_OCRAM BIT(1) #define A10_SYSGMR_MPU_CLEAR_L2_ECC_OFST 0xA8 #define A10_SYSGMR_MPU_CLEAR_L2_ECC_SB BIT(15) #define A10_SYSGMR_MPU_CLEAR_L2_ECC_MB BIT(31) /* Arria 10 L2 ECC Management Group Defines */ #define ALTR_A10_L2_ECC_CTL_OFST 0x0 #define ALTR_A10_L2_ECC_EN_CTL BIT(0) #define ALTR_A10_L2_ECC_STATUS 0xFFD060A4 #define ALTR_A10_L2_ECC_STAT_OFST 0xA4 #define ALTR_A10_L2_ECC_SERR_PEND BIT(0) #define ALTR_A10_L2_ECC_MERR_PEND BIT(0) #define ALTR_A10_L2_ECC_CLR_OFST 0x4 #define ALTR_A10_L2_ECC_SERR_CLR BIT(15) #define ALTR_A10_L2_ECC_MERR_CLR BIT(31) #define ALTR_A10_L2_ECC_INJ_OFST ALTR_A10_L2_ECC_CTL_OFST #define ALTR_A10_L2_ECC_CE_INJ_MASK 0x00000101 #define ALTR_A10_L2_ECC_UE_INJ_MASK 0x00010101 /* Arria 10 OCRAM ECC Management Group Defines */ #define ALTR_A10_OCRAM_ECC_EN_CTL (BIT(1) | BIT(0)) /* Arria 10 Ethernet ECC Management Group Defines */ #define ALTR_A10_COMMON_ECC_EN_CTL BIT(0) /* Arria 10 SDMMC ECC Management Group Defines */ #define ALTR_A10_SDMMC_IRQ_MASK (BIT(16) | BIT(15)) /* A10 ECC Controller memory initialization timeout */ #define ALTR_A10_ECC_INIT_WATCHDOG_10US 10000 /************* Stratix10 Defines **************/ #define ALTR_S10_ECC_CTRL_SDRAM_OFST 0x00 #define ALTR_S10_ECC_EN BIT(0) #define ALTR_S10_ECC_ERRINTEN_OFST 0x10 #define ALTR_S10_ECC_ERRINTENS_OFST 0x14 #define ALTR_S10_ECC_ERRINTENR_OFST 0x18 #define ALTR_S10_ECC_SERRINTEN BIT(0) #define ALTR_S10_ECC_INTMODE_OFST 0x1C #define ALTR_S10_ECC_INTMODE BIT(0) #define ALTR_S10_ECC_INTSTAT_OFST 0x20 #define ALTR_S10_ECC_SERRPENA BIT(0) #define ALTR_S10_ECC_DERRPENA BIT(8) #define ALTR_S10_ECC_ERRPENA_MASK (ALTR_S10_ECC_SERRPENA | \ ALTR_S10_ECC_DERRPENA) #define ALTR_S10_ECC_INTTEST_OFST 0x24 #define ALTR_S10_ECC_TSERRA BIT(0) #define ALTR_S10_ECC_TDERRA BIT(8) #define ALTR_S10_ECC_TSERRB BIT(16) #define ALTR_S10_ECC_TDERRB BIT(24) #define ALTR_S10_DERR_ADDRA_OFST 0x2C /* Stratix10 ECC Manager Defines */ #define S10_SYSMGR_ECC_INTMASK_CLR_OFST 0x98 #define S10_SYSMGR_ECC_INTSTAT_DERR_OFST 0xA0 /* Sticky registers for Uncorrected Errors */ #define S10_SYSMGR_UE_VAL_OFST 0x220 #define S10_SYSMGR_UE_ADDR_OFST 0x224 #define S10_DDR0_IRQ_MASK BIT(16) #define S10_DBE_IRQ_MASK 0x3FFFE /* Define ECC Block Offsets for peripherals */ #define ECC_BLK_ADDRESS_OFST 0x40 #define ECC_BLK_RDATA0_OFST 0x44 #define ECC_BLK_RDATA1_OFST 0x48 #define ECC_BLK_RDATA2_OFST 0x4C #define ECC_BLK_RDATA3_OFST 0x50 #define ECC_BLK_WDATA0_OFST 0x54 #define ECC_BLK_WDATA1_OFST 0x58 #define ECC_BLK_WDATA2_OFST 0x5C #define ECC_BLK_WDATA3_OFST 0x60 #define ECC_BLK_RECC0_OFST 0x64 #define ECC_BLK_RECC1_OFST 0x68 #define ECC_BLK_WECC0_OFST 0x6C #define ECC_BLK_WECC1_OFST 0x70 #define ECC_BLK_DBYTECTRL_OFST 0x74 #define ECC_BLK_ACCCTRL_OFST 0x78 #define ECC_BLK_STARTACC_OFST 0x7C #define ECC_XACT_KICK 0x10000 #define ECC_WORD_WRITE 0xFF #define ECC_WRITE_DOVR 0x101 #define ECC_WRITE_EDOVR 0x103 #define ECC_READ_EOVR 0x2 #define ECC_READ_EDOVR 0x3 struct altr_edac_device_dev; struct edac_device_prv_data { int (*setup)(struct altr_edac_device_dev *device); int ce_clear_mask; int ue_clear_mask; int irq_status_mask; void * (*alloc_mem)(size_t size, void **other); void (*free_mem)(void *p, size_t size, void *other); int ecc_enable_mask; int ecc_en_ofst; int ce_set_mask; int ue_set_mask; int set_err_ofst; irqreturn_t (*ecc_irq_handler)(int irq, void *dev_id); int trig_alloc_sz; const struct file_operations *inject_fops; bool panic; }; struct altr_edac_device_dev { struct list_head next; void __iomem *base; int sb_irq; int db_irq; const struct edac_device_prv_data *data; struct dentry *debugfs_dir; char *edac_dev_name; struct altr_arria10_edac *edac; struct edac_device_ctl_info *edac_dev; struct device ddev; int edac_idx; }; struct altr_arria10_edac { struct device *dev; struct regmap *ecc_mgr_map; int sb_irq; int db_irq; struct irq_domain *domain; struct irq_chip irq_chip; struct list_head a10_ecc_devices; struct notifier_block panic_notifier; }; #endif /* #ifndef _ALTERA_EDAC_H */
// SPDX-License-Identifier: GPL-2.0-or-later /* * DMI based code to deal with broken DSDTs on X86 tablets which ship with * Android as (part of) the factory image. The factory kernels shipped on these * devices typically have a bunch of things hardcoded, rather than specified * in their DSDT. * * Copyright (C) 2021-2023 Hans de Goede <[email protected]> */ #include <linux/dmi.h> #include <linux/init.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include "x86-android-tablets.h" const struct dmi_system_id x86_android_tablet_ids[] __initconst = { { /* Acer Iconia One 7 B1-750 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"), }, .driver_data = (void *)&acer_b1_750_info, }, { /* Advantech MICA-071 */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Advantech"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MICA-071"), }, .driver_data = (void *)&advantech_mica_071_info, }, { /* Asus MeMO Pad 7 ME176C */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"), }, .driver_data = (void *)&asus_me176c_info, }, { /* Asus TF103C */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), }, .driver_data = (void *)&asus_tf103c_info, }, { /* Chuwi Hi8 (CWI509) */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hampoo"), DMI_MATCH(DMI_BOARD_NAME, "BYT-PA03C"), DMI_MATCH(DMI_SYS_VENDOR, "ilife"), DMI_MATCH(DMI_PRODUCT_NAME, "S806"), }, .driver_data = (void *)&chuwi_hi8_info, }, { /* Cyberbook T116 Android version */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Default string"), DMI_MATCH(DMI_BOARD_NAME, "Cherry Trail CR"), /* Above strings are much too generic, also match on SKU + BIOS date */ DMI_MATCH(DMI_PRODUCT_SKU, "20170531"), DMI_MATCH(DMI_BIOS_DATE, "07/12/2017"), }, .driver_data = (void *)&cyberbook_t116_info, }, { /* CZC P10T */ .ident = "CZC ODEON TPC-10 (\"P10T\")", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "CZC"), DMI_MATCH(DMI_PRODUCT_NAME, "ODEON*TPC-10"), }, .driver_data = (void *)&czc_p10t, }, { /* CZC P10T variant */ .ident = "ViewSonic ViewPad 10", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ViewSonic"), DMI_MATCH(DMI_PRODUCT_NAME, "VPAD10"), }, .driver_data = (void *)&czc_p10t, }, { /* Lenovo Yoga Book X90F / X90L */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), }, .driver_data = (void *)&lenovo_yogabook_x90_info, }, { /* Lenovo Yoga Book X91F / X91L */ .matches = { /* Inexact match to match F + L versions */ DMI_MATCH(DMI_PRODUCT_NAME, "Lenovo YB1-X91"), }, .driver_data = (void *)&lenovo_yogabook_x91_info, }, { /* * Lenovo Yoga Tablet 2 Pro 1380F/L (13") * This has more or less the same BIOS as the 830F/L or 1050F/L * (8" and 10") below, but unlike the 8"/10" models which share * the same mainboard this model has a different mainboard. * This match for the 13" model MUST come before the 8" + 10" * match since that one will also match the 13" model! */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), /* Full match so as to NOT match the 830/1050 BIOS */ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21.X64.0005.R00.1504101516"), }, .driver_data = (void *)&lenovo_yoga_tab2_1380_info, }, { /* * Lenovo Yoga Tablet 2 830F/L or 1050F/L * The 8" and 10" Lenovo Yoga Tablet 2 use the same mainboard. */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), /* Partial match on beginning of BIOS version */ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), }, .driver_data = (void *)&lenovo_yoga_tab2_830_1050_info, }, { /* Lenovo Yoga Tab 3 Pro YT3-X90F */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), DMI_MATCH(DMI_PRODUCT_VERSION, "Blade3-10A-001"), }, .driver_data = (void *)&lenovo_yt3_info, }, { /* Medion Lifetab S10346 */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), /* Above strings are much too generic, also match on BIOS date */ DMI_MATCH(DMI_BIOS_DATE, "10/22/2015"), }, .driver_data = (void *)&medion_lifetab_s10346_info, }, { /* Nextbook Ares 8 (BYT version) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"), }, .driver_data = (void *)&nextbook_ares8_info, }, { /* Nextbook Ares 8A (CHT version) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), DMI_MATCH(DMI_PRODUCT_NAME, "CherryTrail"), DMI_MATCH(DMI_BIOS_VERSION, "M882"), }, .driver_data = (void *)&nextbook_ares8a_info, }, { /* Peaq C1010 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "PEAQ"), DMI_MATCH(DMI_PRODUCT_NAME, "PEAQ PMM C1010 MD99187"), }, .driver_data = (void *)&peaq_c1010_info, }, { /* Vexia Edu Atla 10 tablet 9V version */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), /* Above strings are too generic, also match on BIOS date */ DMI_MATCH(DMI_BIOS_DATE, "08/25/2014"), }, .driver_data = (void *)&vexia_edu_atla10_info, }, { /* Whitelabel (sold as various brands) TM800A550L */ .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"), DMI_MATCH(DMI_BOARD_NAME, "Aptio CRB"), /* Above strings are too generic, also match on BIOS version */ DMI_MATCH(DMI_BIOS_VERSION, "ZY-8-BI-PX4S70VTR400-X423B-005-D"), }, .driver_data = (void *)&whitelabel_tm800a550l_info, }, { /* Xiaomi Mi Pad 2 */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Xiaomi Inc"), DMI_MATCH(DMI_PRODUCT_NAME, "Mipad2"), }, .driver_data = (void *)&xiaomi_mipad2_info, }, { } }; MODULE_DEVICE_TABLE(dmi, x86_android_tablet_ids);
// SPDX-License-Identifier: GPL-2.0-or-later /* * Generic SCSI-3 ALUA SCSI Device Handler * * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH. * All rights reserved. */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/unaligned.h> #include <scsi/scsi.h> #include <scsi/scsi_proto.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #define ALUA_DH_NAME "alua" #define ALUA_DH_VER "2.0" #define TPGS_SUPPORT_NONE 0x00 #define TPGS_SUPPORT_OPTIMIZED 0x01 #define TPGS_SUPPORT_NONOPTIMIZED 0x02 #define TPGS_SUPPORT_STANDBY 0x04 #define TPGS_SUPPORT_UNAVAILABLE 0x08 #define TPGS_SUPPORT_LBA_DEPENDENT 0x10 #define TPGS_SUPPORT_OFFLINE 0x40 #define TPGS_SUPPORT_TRANSITION 0x80 #define TPGS_SUPPORT_ALL 0xdf #define RTPG_FMT_MASK 0x70 #define RTPG_FMT_EXT_HDR 0x10 #define TPGS_MODE_UNINITIALIZED -1 #define TPGS_MODE_NONE 0x0 #define TPGS_MODE_IMPLICIT 0x1 #define TPGS_MODE_EXPLICIT 0x2 #define ALUA_RTPG_SIZE 128 #define ALUA_FAILOVER_TIMEOUT 60 #define ALUA_FAILOVER_RETRIES 5 #define ALUA_RTPG_DELAY_MSECS 5 #define ALUA_RTPG_RETRY_DELAY 2 /* device handler flags */ #define ALUA_OPTIMIZE_STPG 0x01 #define ALUA_RTPG_EXT_HDR_UNSUPP 0x02 /* State machine flags */ #define ALUA_PG_RUN_RTPG 0x10 #define ALUA_PG_RUN_STPG 0x20 #define ALUA_PG_RUNNING 0x40 static uint optimize_stpg; module_param(optimize_stpg, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(optimize_stpg, "Allow use of a non-optimized path, rather than sending a STPG, when implicit TPGS is supported (0=No,1=Yes). Default is 0."); static LIST_HEAD(port_group_list); static DEFINE_SPINLOCK(port_group_lock); static struct workqueue_struct *kaluad_wq; struct alua_port_group { struct kref kref; struct rcu_head rcu; struct list_head node; struct list_head dh_list; unsigned char device_id_str[256]; int device_id_len; int group_id; int tpgs; int state; int pref; int valid_states; unsigned flags; /* used for optimizing STPG */ unsigned char transition_tmo; unsigned long expiry; unsigned long interval; struct delayed_work rtpg_work; spinlock_t lock; struct list_head rtpg_list; struct scsi_device *rtpg_sdev; }; struct alua_dh_data { struct list_head node; struct alua_port_group __rcu *pg; int group_id; spinlock_t pg_lock; struct scsi_device *sdev; int init_error; struct mutex init_mutex; bool disabled; }; struct alua_queue_data { struct list_head entry; activate_complete callback_fn; void *callback_data; }; #define ALUA_POLICY_SWITCH_CURRENT 0 #define ALUA_POLICY_SWITCH_ALL 1 static void alua_rtpg_work(struct work_struct *work); static bool alua_rtpg_queue(struct alua_port_group *pg, struct scsi_device *sdev, struct alua_queue_data *qdata, bool force); static void alua_check(struct scsi_device *sdev, bool force); static void release_port_group(struct kref *kref) { struct alua_port_group *pg; pg = container_of(kref, struct alua_port_group, kref); if (pg->rtpg_sdev) flush_delayed_work(&pg->rtpg_work); spin_lock(&port_group_lock); list_del(&pg->node); spin_unlock(&port_group_lock); kfree_rcu(pg, rcu); } /* * submit_rtpg - Issue a REPORT TARGET GROUP STATES command * @sdev: sdev the command should be sent to */ static int submit_rtpg(struct scsi_device *sdev, unsigned char *buff, int bufflen, struct scsi_sense_hdr *sshdr, int flags) { u8 cdb[MAX_COMMAND_SIZE]; blk_opf_t opf = REQ_OP_DRV_IN | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; const struct scsi_exec_args exec_args = { .sshdr = sshdr, }; /* Prepare the command. */ memset(cdb, 0x0, MAX_COMMAND_SIZE); cdb[0] = MAINTENANCE_IN; if (!(flags & ALUA_RTPG_EXT_HDR_UNSUPP)) cdb[1] = MI_REPORT_TARGET_PGS | MI_EXT_HDR_PARAM_FMT; else cdb[1] = MI_REPORT_TARGET_PGS; put_unaligned_be32(bufflen, &cdb[6]); return scsi_execute_cmd(sdev, cdb, opf, buff, bufflen, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, &exec_args); } /* * submit_stpg - Issue a SET TARGET PORT GROUP command * * Currently we're only setting the current target port group state * to 'active/optimized' and let the array firmware figure out * the states of the remaining groups. */ static int submit_stpg(struct scsi_device *sdev, int group_id, struct scsi_sense_hdr *sshdr) { u8 cdb[MAX_COMMAND_SIZE]; unsigned char stpg_data[8]; int stpg_len = 8; blk_opf_t opf = REQ_OP_DRV_OUT | REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; const struct scsi_exec_args exec_args = { .sshdr = sshdr, }; /* Prepare the data buffer */ memset(stpg_data, 0, stpg_len); stpg_data[4] = SCSI_ACCESS_STATE_OPTIMAL; put_unaligned_be16(group_id, &stpg_data[6]); /* Prepare the command. */ memset(cdb, 0x0, MAX_COMMAND_SIZE); cdb[0] = MAINTENANCE_OUT; cdb[1] = MO_SET_TARGET_PGS; put_unaligned_be32(stpg_len, &cdb[6]); return scsi_execute_cmd(sdev, cdb, opf, stpg_data, stpg_len, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, &exec_args); } static struct alua_port_group *alua_find_get_pg(char *id_str, size_t id_size, int group_id) { struct alua_port_group *pg; if (!id_str || !id_size || !strlen(id_str)) return NULL; list_for_each_entry(pg, &port_group_list, node) { if (pg->group_id != group_id) continue; if (!pg->device_id_len || pg->device_id_len != id_size) continue; if (strncmp(pg->device_id_str, id_str, id_size)) continue; if (!kref_get_unless_zero(&pg->kref)) continue; return pg; } return NULL; } /* * alua_alloc_pg - Allocate a new port_group structure * @sdev: scsi device * @group_id: port group id * @tpgs: target port group settings * * Allocate a new port_group structure for a given * device. */ static struct alua_port_group *alua_alloc_pg(struct scsi_device *sdev, int group_id, int tpgs) { struct alua_port_group *pg, *tmp_pg; pg = kzalloc(sizeof(struct alua_port_group), GFP_KERNEL); if (!pg) return ERR_PTR(-ENOMEM); pg->device_id_len = scsi_vpd_lun_id(sdev, pg->device_id_str, sizeof(pg->device_id_str)); if (pg->device_id_len <= 0) { /* * TPGS supported but no device identification found. * Generate private device identification. */ sdev_printk(KERN_INFO, sdev, "%s: No device descriptors found\n", ALUA_DH_NAME); pg->device_id_str[0] = '\0'; pg->device_id_len = 0; } pg->group_id = group_id; pg->tpgs = tpgs; pg->state = SCSI_ACCESS_STATE_OPTIMAL; pg->valid_states = TPGS_SUPPORT_ALL; if (optimize_stpg) pg->flags |= ALUA_OPTIMIZE_STPG; kref_init(&pg->kref); INIT_DELAYED_WORK(&pg->rtpg_work, alua_rtpg_work); INIT_LIST_HEAD(&pg->rtpg_list); INIT_LIST_HEAD(&pg->node); INIT_LIST_HEAD(&pg->dh_list); spin_lock_init(&pg->lock); spin_lock(&port_group_lock); tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, group_id); if (tmp_pg) { spin_unlock(&port_group_lock); kfree(pg); return tmp_pg; } list_add(&pg->node, &port_group_list); spin_unlock(&port_group_lock); return pg; } /* * alua_check_tpgs - Evaluate TPGS setting * @sdev: device to be checked * * Examine the TPGS setting of the sdev to find out if ALUA * is supported. */ static int alua_check_tpgs(struct scsi_device *sdev) { int tpgs = TPGS_MODE_NONE; /* * ALUA support for non-disk devices is fraught with * difficulties, so disable it for now. */ if (sdev->type != TYPE_DISK) { sdev_printk(KERN_INFO, sdev, "%s: disable for non-disk devices\n", ALUA_DH_NAME); return tpgs; } tpgs = scsi_device_tpgs(sdev); switch (tpgs) { case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit and explicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_EXPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_NONE: sdev_printk(KERN_INFO, sdev, "%s: not supported\n", ALUA_DH_NAME); break; default: sdev_printk(KERN_INFO, sdev, "%s: unsupported TPGS setting %d\n", ALUA_DH_NAME, tpgs); tpgs = TPGS_MODE_NONE; break; } return tpgs; } /* * alua_check_vpd - Evaluate INQUIRY vpd page 0x83 * @sdev: device to be checked * * Extract the relative target port and the target port group * descriptor from the list of identificators. */ static int alua_check_vpd(struct scsi_device *sdev, struct alua_dh_data *h, int tpgs) { int rel_port = -1, group_id; struct alua_port_group *pg, *old_pg = NULL; bool pg_updated = false; unsigned long flags; group_id = scsi_vpd_tpg_id(sdev, &rel_port); if (group_id < 0) { /* * Internal error; TPGS supported but required * VPD identification descriptors not present. * Disable ALUA support */ sdev_printk(KERN_INFO, sdev, "%s: No target port descriptors found\n", ALUA_DH_NAME); return SCSI_DH_DEV_UNSUPP; } pg = alua_alloc_pg(sdev, group_id, tpgs); if (IS_ERR(pg)) { if (PTR_ERR(pg) == -ENOMEM) return SCSI_DH_NOMEM; return SCSI_DH_DEV_UNSUPP; } if (pg->device_id_len) sdev_printk(KERN_INFO, sdev, "%s: device %s port group %x rel port %x\n", ALUA_DH_NAME, pg->device_id_str, group_id, rel_port); else sdev_printk(KERN_INFO, sdev, "%s: port group %x rel port %x\n", ALUA_DH_NAME, group_id, rel_port); kref_get(&pg->kref); /* Check for existing port group references */ spin_lock(&h->pg_lock); old_pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); if (old_pg != pg) { /* port group has changed. Update to new port group */ if (h->pg) { spin_lock_irqsave(&old_pg->lock, flags); list_del_rcu(&h->node); spin_unlock_irqrestore(&old_pg->lock, flags); } rcu_assign_pointer(h->pg, pg); pg_updated = true; } spin_lock_irqsave(&pg->lock, flags); if (pg_updated) list_add_rcu(&h->node, &pg->dh_list); spin_unlock_irqrestore(&pg->lock, flags); spin_unlock(&h->pg_lock); alua_rtpg_queue(pg, sdev, NULL, true); kref_put(&pg->kref, release_port_group); if (old_pg) kref_put(&old_pg->kref, release_port_group); return SCSI_DH_OK; } static char print_alua_state(unsigned char state) { switch (state) { case SCSI_ACCESS_STATE_OPTIMAL: return 'A'; case SCSI_ACCESS_STATE_ACTIVE: return 'N'; case SCSI_ACCESS_STATE_STANDBY: return 'S'; case SCSI_ACCESS_STATE_UNAVAILABLE: return 'U'; case SCSI_ACCESS_STATE_LBA: return 'L'; case SCSI_ACCESS_STATE_OFFLINE: return 'O'; case SCSI_ACCESS_STATE_TRANSITIONING: return 'T'; default: return 'X'; } } static void alua_handle_state_transition(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; rcu_read_lock(); pg = rcu_dereference(h->pg); if (pg) pg->state = SCSI_ACCESS_STATE_TRANSITIONING; rcu_read_unlock(); alua_check(sdev, false); } static enum scsi_disposition alua_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { /* * LUN Not Accessible - ALUA state transition */ alua_handle_state_transition(sdev); return NEEDS_RETRY; } break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) { /* * LUN Not Accessible - ALUA state transition */ alua_handle_state_transition(sdev); return NEEDS_RETRY; } if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) { /* * Power On, Reset, or Bus Device Reset. * Might have obscured a state transition, * so schedule a recheck. */ alua_check(sdev, true); return ADD_TO_MLQUEUE; } if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x04) /* * Device internal reset */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) /* * Mode Parameters Changed */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) { /* * ALUA state changed */ alua_check(sdev, true); return ADD_TO_MLQUEUE; } if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) { /* * Implicit ALUA state transition failed */ alua_check(sdev, true); return ADD_TO_MLQUEUE; } if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) /* * Inquiry data has changed */ return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) /* * REPORTED_LUNS_DATA_HAS_CHANGED is reported * when switching controllers on targets like * Intel Multi-Flex. We can just retry. */ return ADD_TO_MLQUEUE; break; } return SCSI_RETURN_NOT_HANDLED; } /* * alua_tur - Send a TEST UNIT READY * @sdev: device to which the TEST UNIT READY command should be send * * Send a TEST UNIT READY to @sdev to figure out the device state * Returns SCSI_DH_RETRY if the sense code is NOT READY/ALUA TRANSITIONING, * SCSI_DH_OK if no error occurred, and SCSI_DH_IO otherwise. */ static int alua_tur(struct scsi_device *sdev) { struct scsi_sense_hdr sense_hdr; int retval; retval = scsi_test_unit_ready(sdev, ALUA_FAILOVER_TIMEOUT * HZ, ALUA_FAILOVER_RETRIES, &sense_hdr); if ((sense_hdr.sense_key == NOT_READY || sense_hdr.sense_key == UNIT_ATTENTION) && sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) return SCSI_DH_RETRY; else if (retval) return SCSI_DH_IO; else return SCSI_DH_OK; } /* * alua_rtpg - Evaluate REPORT TARGET GROUP STATES * @sdev: the device to be evaluated. * * Evaluate the Target Port Group State. * Returns SCSI_DH_DEV_OFFLINED if the path is * found to be unusable. */ static int alua_rtpg(struct scsi_device *sdev, struct alua_port_group *pg) { struct scsi_sense_hdr sense_hdr; struct alua_port_group *tmp_pg; int len, k, off, bufflen = ALUA_RTPG_SIZE; int group_id_old, state_old, pref_old, valid_states_old; unsigned char *desc, *buff; unsigned err; int retval; unsigned int tpg_desc_tbl_off; unsigned char orig_transition_tmo; unsigned long flags; bool transitioning_sense = false; group_id_old = pg->group_id; state_old = pg->state; pref_old = pg->pref; valid_states_old = pg->valid_states; if (!pg->expiry) { unsigned long transition_tmo = ALUA_FAILOVER_TIMEOUT * HZ; if (pg->transition_tmo) transition_tmo = pg->transition_tmo * HZ; pg->expiry = round_jiffies_up(jiffies + transition_tmo); } buff = kzalloc(bufflen, GFP_KERNEL); if (!buff) return SCSI_DH_DEV_TEMP_BUSY; retry: err = 0; retval = submit_rtpg(sdev, buff, bufflen, &sense_hdr, pg->flags); if (retval) { /* * Some (broken) implementations have a habit of returning * an error during things like firmware update etc. * But if the target only supports active/optimized there's * not much we can do; it's not that we can switch paths * or anything. * So ignore any errors to avoid spurious failures during * path failover. */ if ((pg->valid_states & ~TPGS_SUPPORT_OPTIMIZED) == 0) { sdev_printk(KERN_INFO, sdev, "%s: ignoring rtpg result %d\n", ALUA_DH_NAME, retval); kfree(buff); return SCSI_DH_OK; } if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { sdev_printk(KERN_INFO, sdev, "%s: rtpg failed, result %d\n", ALUA_DH_NAME, retval); kfree(buff); if (retval < 0) return SCSI_DH_DEV_TEMP_BUSY; if (host_byte(retval) == DID_NO_CONNECT) return SCSI_DH_RES_TEMP_UNAVAIL; return SCSI_DH_IO; } /* * submit_rtpg() has failed on existing arrays * when requesting extended header info, and * the array doesn't support extended headers, * even though it shouldn't according to T10. * The retry without rtpg_ext_hdr_req set * handles this. * Note: some arrays return a sense key of ILLEGAL_REQUEST * with ASC 00h if they don't support the extended header. */ if (!(pg->flags & ALUA_RTPG_EXT_HDR_UNSUPP) && sense_hdr.sense_key == ILLEGAL_REQUEST) { pg->flags |= ALUA_RTPG_EXT_HDR_UNSUPP; goto retry; } /* * If the array returns with 'ALUA state transition' * sense code here it cannot return RTPG data during * transition. So set the state to 'transitioning' directly. */ if (sense_hdr.sense_key == NOT_READY && sense_hdr.asc == 0x04 && sense_hdr.ascq == 0x0a) { transitioning_sense = true; goto skip_rtpg; } /* * Retry on any other UNIT ATTENTION occurred. */ if (sense_hdr.sense_key == UNIT_ATTENTION) err = SCSI_DH_RETRY; if (err == SCSI_DH_RETRY && pg->expiry != 0 && time_before(jiffies, pg->expiry)) { sdev_printk(KERN_ERR, sdev, "%s: rtpg retry\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); kfree(buff); return err; } sdev_printk(KERN_ERR, sdev, "%s: rtpg failed\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); kfree(buff); pg->expiry = 0; return SCSI_DH_IO; } len = get_unaligned_be32(&buff[0]) + 4; if (len > bufflen) { /* Resubmit with the correct length */ kfree(buff); bufflen = len; buff = kmalloc(bufflen, GFP_KERNEL); if (!buff) { sdev_printk(KERN_WARNING, sdev, "%s: kmalloc buffer failed\n",__func__); /* Temporary failure, bypass */ pg->expiry = 0; return SCSI_DH_DEV_TEMP_BUSY; } goto retry; } orig_transition_tmo = pg->transition_tmo; if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR && buff[5] != 0) pg->transition_tmo = buff[5]; else pg->transition_tmo = ALUA_FAILOVER_TIMEOUT; if (orig_transition_tmo != pg->transition_tmo) { sdev_printk(KERN_INFO, sdev, "%s: transition timeout set to %d seconds\n", ALUA_DH_NAME, pg->transition_tmo); pg->expiry = jiffies + pg->transition_tmo * HZ; } if ((buff[4] & RTPG_FMT_MASK) == RTPG_FMT_EXT_HDR) tpg_desc_tbl_off = 8; else tpg_desc_tbl_off = 4; for (k = tpg_desc_tbl_off, desc = buff + tpg_desc_tbl_off; k < len; k += off, desc += off) { u16 group_id = get_unaligned_be16(&desc[2]); spin_lock_irqsave(&port_group_lock, flags); tmp_pg = alua_find_get_pg(pg->device_id_str, pg->device_id_len, group_id); spin_unlock_irqrestore(&port_group_lock, flags); if (tmp_pg) { if (spin_trylock_irqsave(&tmp_pg->lock, flags)) { if ((tmp_pg == pg) || !(tmp_pg->flags & ALUA_PG_RUNNING)) { struct alua_dh_data *h; tmp_pg->state = desc[0] & 0x0f; tmp_pg->pref = desc[0] >> 7; rcu_read_lock(); list_for_each_entry_rcu(h, &tmp_pg->dh_list, node) { if (!h->sdev) continue; h->sdev->access_state = desc[0]; } rcu_read_unlock(); } if (tmp_pg == pg) tmp_pg->valid_states = desc[1]; spin_unlock_irqrestore(&tmp_pg->lock, flags); } kref_put(&tmp_pg->kref, release_port_group); } off = 8 + (desc[7] * 4); } skip_rtpg: spin_lock_irqsave(&pg->lock, flags); if (transitioning_sense) pg->state = SCSI_ACCESS_STATE_TRANSITIONING; if (group_id_old != pg->group_id || state_old != pg->state || pref_old != pg->pref || valid_states_old != pg->valid_states) sdev_printk(KERN_INFO, sdev, "%s: port group %02x state %c %s supports %c%c%c%c%c%c%c\n", ALUA_DH_NAME, pg->group_id, print_alua_state(pg->state), pg->pref ? "preferred" : "non-preferred", pg->valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', pg->valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', pg->valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', pg->valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', pg->valid_states&TPGS_SUPPORT_STANDBY?'S':'s', pg->valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', pg->valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); switch (pg->state) { case SCSI_ACCESS_STATE_TRANSITIONING: if (time_before(jiffies, pg->expiry)) { /* State transition, retry */ pg->interval = ALUA_RTPG_RETRY_DELAY; err = SCSI_DH_RETRY; } else { struct alua_dh_data *h; /* Transitioning time exceeded, set port to standby */ err = SCSI_DH_IO; pg->state = SCSI_ACCESS_STATE_STANDBY; pg->expiry = 0; rcu_read_lock(); list_for_each_entry_rcu(h, &pg->dh_list, node) { if (!h->sdev) continue; h->sdev->access_state = (pg->state & SCSI_ACCESS_STATE_MASK); if (pg->pref) h->sdev->access_state |= SCSI_ACCESS_STATE_PREFERRED; } rcu_read_unlock(); } break; case SCSI_ACCESS_STATE_OFFLINE: /* Path unusable */ err = SCSI_DH_DEV_OFFLINED; pg->expiry = 0; break; default: /* Useable path if active */ err = SCSI_DH_OK; pg->expiry = 0; break; } spin_unlock_irqrestore(&pg->lock, flags); kfree(buff); return err; } /* * alua_stpg - Issue a SET TARGET PORT GROUP command * * Issue a SET TARGET PORT GROUP command and evaluate the * response. Returns SCSI_DH_RETRY per default to trigger * a re-evaluation of the target group state or SCSI_DH_OK * if no further action needs to be taken. */ static unsigned alua_stpg(struct scsi_device *sdev, struct alua_port_group *pg) { int retval; struct scsi_sense_hdr sense_hdr; if (!(pg->tpgs & TPGS_MODE_EXPLICIT)) { /* Only implicit ALUA supported, retry */ return SCSI_DH_RETRY; } switch (pg->state) { case SCSI_ACCESS_STATE_OPTIMAL: return SCSI_DH_OK; case SCSI_ACCESS_STATE_ACTIVE: if ((pg->flags & ALUA_OPTIMIZE_STPG) && !pg->pref && (pg->tpgs & TPGS_MODE_IMPLICIT)) return SCSI_DH_OK; break; case SCSI_ACCESS_STATE_STANDBY: case SCSI_ACCESS_STATE_UNAVAILABLE: break; case SCSI_ACCESS_STATE_OFFLINE: return SCSI_DH_IO; case SCSI_ACCESS_STATE_TRANSITIONING: break; default: sdev_printk(KERN_INFO, sdev, "%s: stpg failed, unhandled TPGS state %d", ALUA_DH_NAME, pg->state); return SCSI_DH_NOSYS; } retval = submit_stpg(sdev, pg->group_id, &sense_hdr); if (retval) { if (retval < 0 || !scsi_sense_valid(&sense_hdr)) { sdev_printk(KERN_INFO, sdev, "%s: stpg failed, result %d", ALUA_DH_NAME, retval); if (retval < 0) return SCSI_DH_DEV_TEMP_BUSY; } else { sdev_printk(KERN_INFO, sdev, "%s: stpg failed\n", ALUA_DH_NAME); scsi_print_sense_hdr(sdev, ALUA_DH_NAME, &sense_hdr); } } /* Retry RTPG */ return SCSI_DH_RETRY; } /* * The caller must call scsi_device_put() on the returned pointer if it is not * NULL. */ static struct scsi_device * __must_check alua_rtpg_select_sdev(struct alua_port_group *pg) { struct alua_dh_data *h; struct scsi_device *sdev = NULL, *prev_sdev; lockdep_assert_held(&pg->lock); if (WARN_ON(!pg->rtpg_sdev)) return NULL; /* * RCU protection isn't necessary for dh_list here * as we hold pg->lock, but for access to h->pg. */ rcu_read_lock(); list_for_each_entry_rcu(h, &pg->dh_list, node) { if (!h->sdev) continue; if (h->sdev == pg->rtpg_sdev) { h->disabled = true; continue; } if (rcu_dereference(h->pg) == pg && !h->disabled && !scsi_device_get(h->sdev)) { sdev = h->sdev; break; } } rcu_read_unlock(); if (!sdev) { pr_warn("%s: no device found for rtpg\n", (pg->device_id_len ? (char *)pg->device_id_str : "(nameless PG)")); return NULL; } sdev_printk(KERN_INFO, sdev, "rtpg retry on different device\n"); prev_sdev = pg->rtpg_sdev; pg->rtpg_sdev = sdev; return prev_sdev; } static void alua_rtpg_work(struct work_struct *work) { struct alua_port_group *pg = container_of(work, struct alua_port_group, rtpg_work.work); struct scsi_device *sdev, *prev_sdev = NULL; LIST_HEAD(qdata_list); int err = SCSI_DH_OK; struct alua_queue_data *qdata, *tmp; struct alua_dh_data *h; unsigned long flags; spin_lock_irqsave(&pg->lock, flags); sdev = pg->rtpg_sdev; if (!sdev) { WARN_ON(pg->flags & ALUA_PG_RUN_RTPG); WARN_ON(pg->flags & ALUA_PG_RUN_STPG); spin_unlock_irqrestore(&pg->lock, flags); kref_put(&pg->kref, release_port_group); return; } pg->flags |= ALUA_PG_RUNNING; if (pg->flags & ALUA_PG_RUN_RTPG) { int state = pg->state; pg->flags &= ~ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); if (state == SCSI_ACCESS_STATE_TRANSITIONING) { if (alua_tur(sdev) == SCSI_DH_RETRY) { spin_lock_irqsave(&pg->lock, flags); pg->flags &= ~ALUA_PG_RUNNING; pg->flags |= ALUA_PG_RUN_RTPG; if (!pg->interval) pg->interval = ALUA_RTPG_RETRY_DELAY; spin_unlock_irqrestore(&pg->lock, flags); queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); return; } /* Send RTPG on failure or if TUR indicates SUCCESS */ } err = alua_rtpg(sdev, pg); spin_lock_irqsave(&pg->lock, flags); /* If RTPG failed on the current device, try using another */ if (err == SCSI_DH_RES_TEMP_UNAVAIL && (prev_sdev = alua_rtpg_select_sdev(pg))) err = SCSI_DH_IMM_RETRY; if (err == SCSI_DH_RETRY || err == SCSI_DH_IMM_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { pg->flags &= ~ALUA_PG_RUNNING; if (err == SCSI_DH_IMM_RETRY) pg->interval = 0; else if (!pg->interval && !(pg->flags & ALUA_PG_RUN_RTPG)) pg->interval = ALUA_RTPG_RETRY_DELAY; pg->flags |= ALUA_PG_RUN_RTPG; spin_unlock_irqrestore(&pg->lock, flags); goto queue_rtpg; } if (err != SCSI_DH_OK) pg->flags &= ~ALUA_PG_RUN_STPG; } if (pg->flags & ALUA_PG_RUN_STPG) { pg->flags &= ~ALUA_PG_RUN_STPG; spin_unlock_irqrestore(&pg->lock, flags); err = alua_stpg(sdev, pg); spin_lock_irqsave(&pg->lock, flags); if (err == SCSI_DH_RETRY || pg->flags & ALUA_PG_RUN_RTPG) { pg->flags |= ALUA_PG_RUN_RTPG; pg->interval = 0; pg->flags &= ~ALUA_PG_RUNNING; spin_unlock_irqrestore(&pg->lock, flags); goto queue_rtpg; } } list_splice_init(&pg->rtpg_list, &qdata_list); /* * We went through an RTPG, for good or bad. * Re-enable all devices for the next attempt. */ list_for_each_entry(h, &pg->dh_list, node) h->disabled = false; pg->rtpg_sdev = NULL; spin_unlock_irqrestore(&pg->lock, flags); if (prev_sdev) scsi_device_put(prev_sdev); list_for_each_entry_safe(qdata, tmp, &qdata_list, entry) { list_del(&qdata->entry); if (qdata->callback_fn) qdata->callback_fn(qdata->callback_data, err); kfree(qdata); } spin_lock_irqsave(&pg->lock, flags); pg->flags &= ~ALUA_PG_RUNNING; spin_unlock_irqrestore(&pg->lock, flags); scsi_device_put(sdev); kref_put(&pg->kref, release_port_group); return; queue_rtpg: if (prev_sdev) scsi_device_put(prev_sdev); queue_delayed_work(kaluad_wq, &pg->rtpg_work, pg->interval * HZ); } /** * alua_rtpg_queue() - cause RTPG to be submitted asynchronously * @pg: ALUA port group associated with @sdev. * @sdev: SCSI device for which to submit an RTPG. * @qdata: Information about the callback to invoke after the RTPG. * @force: Whether or not to submit an RTPG if a work item that will submit an * RTPG already has been scheduled. * * Returns true if and only if alua_rtpg_work() will be called asynchronously. * That function is responsible for calling @qdata->fn(). * * Context: may be called from atomic context (alua_check()) only if the caller * holds an sdev reference. */ static bool alua_rtpg_queue(struct alua_port_group *pg, struct scsi_device *sdev, struct alua_queue_data *qdata, bool force) { int start_queue = 0; unsigned long flags; if (WARN_ON_ONCE(!pg) || scsi_device_get(sdev)) return false; spin_lock_irqsave(&pg->lock, flags); if (qdata) { list_add_tail(&qdata->entry, &pg->rtpg_list); pg->flags |= ALUA_PG_RUN_STPG; force = true; } if (pg->rtpg_sdev == NULL) { struct alua_dh_data *h = sdev->handler_data; rcu_read_lock(); if (h && rcu_dereference(h->pg) == pg) { pg->interval = 0; pg->flags |= ALUA_PG_RUN_RTPG; kref_get(&pg->kref); pg->rtpg_sdev = sdev; start_queue = 1; } rcu_read_unlock(); } else if (!(pg->flags & ALUA_PG_RUN_RTPG) && force) { pg->flags |= ALUA_PG_RUN_RTPG; /* Do not queue if the worker is already running */ if (!(pg->flags & ALUA_PG_RUNNING)) { kref_get(&pg->kref); start_queue = 1; } } spin_unlock_irqrestore(&pg->lock, flags); if (start_queue) { if (queue_delayed_work(kaluad_wq, &pg->rtpg_work, msecs_to_jiffies(ALUA_RTPG_DELAY_MSECS))) sdev = NULL; else kref_put(&pg->kref, release_port_group); } if (sdev) scsi_device_put(sdev); return true; } /* * alua_initialize - Initialize ALUA state * @sdev: the device to be initialized * * For the prep_fn to work correctly we have * to initialize the ALUA state for the device. */ static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) { int err = SCSI_DH_DEV_UNSUPP, tpgs; mutex_lock(&h->init_mutex); h->disabled = false; tpgs = alua_check_tpgs(sdev); if (tpgs != TPGS_MODE_NONE) err = alua_check_vpd(sdev, h, tpgs); h->init_error = err; mutex_unlock(&h->init_mutex); return err; } /* * alua_set_params - set/unset the optimize flag * @sdev: device on the path to be activated * params - parameters in the following format * "no_of_params\0param1\0param2\0param3\0...\0" * For example, to set the flag pass the following parameters * from multipath.conf * hardware_handler "2 alua 1" */ static int alua_set_params(struct scsi_device *sdev, const char *params) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg = NULL; unsigned int optimize = 0, argc; const char *p = params; int result = SCSI_DH_OK; unsigned long flags; if ((sscanf(params, "%u", &argc) != 1) || (argc != 1)) return -EINVAL; while (*p++) ; if ((sscanf(p, "%u", &optimize) != 1) || (optimize > 1)) return -EINVAL; rcu_read_lock(); pg = rcu_dereference(h->pg); if (!pg) { rcu_read_unlock(); return -ENXIO; } spin_lock_irqsave(&pg->lock, flags); if (optimize) pg->flags |= ALUA_OPTIMIZE_STPG; else pg->flags &= ~ALUA_OPTIMIZE_STPG; spin_unlock_irqrestore(&pg->lock, flags); rcu_read_unlock(); return result; } /* * alua_activate - activate a path * @sdev: device on the path to be activated * * We're currently switching the port group to be activated only and * let the array figure out the rest. * There may be other arrays which require us to switch all port groups * based on a certain policy. But until we actually encounter them it * should be okay. */ static int alua_activate(struct scsi_device *sdev, activate_complete fn, void *data) { struct alua_dh_data *h = sdev->handler_data; int err = SCSI_DH_OK; struct alua_queue_data *qdata; struct alua_port_group *pg; qdata = kzalloc(sizeof(*qdata), GFP_KERNEL); if (!qdata) { err = SCSI_DH_RES_TEMP_UNAVAIL; goto out; } qdata->callback_fn = fn; qdata->callback_data = data; mutex_lock(&h->init_mutex); rcu_read_lock(); pg = rcu_dereference(h->pg); if (!pg || !kref_get_unless_zero(&pg->kref)) { rcu_read_unlock(); kfree(qdata); err = h->init_error; mutex_unlock(&h->init_mutex); goto out; } rcu_read_unlock(); mutex_unlock(&h->init_mutex); if (alua_rtpg_queue(pg, sdev, qdata, true)) { fn = NULL; } else { kfree(qdata); err = SCSI_DH_DEV_OFFLINED; } kref_put(&pg->kref, release_port_group); out: if (fn) fn(data, err); return 0; } /* * alua_check - check path status * @sdev: device on the path to be checked * * Check the device status */ static void alua_check(struct scsi_device *sdev, bool force) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; rcu_read_lock(); pg = rcu_dereference(h->pg); if (!pg || !kref_get_unless_zero(&pg->kref)) { rcu_read_unlock(); return; } rcu_read_unlock(); alua_rtpg_queue(pg, sdev, NULL, force); kref_put(&pg->kref, release_port_group); } /* * alua_prep_fn - request callback * * Fail I/O to all paths not in state * active/optimized or active/non-optimized. */ static blk_status_t alua_prep_fn(struct scsi_device *sdev, struct request *req) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; unsigned char state = SCSI_ACCESS_STATE_OPTIMAL; rcu_read_lock(); pg = rcu_dereference(h->pg); if (pg) state = pg->state; rcu_read_unlock(); switch (state) { case SCSI_ACCESS_STATE_OPTIMAL: case SCSI_ACCESS_STATE_ACTIVE: case SCSI_ACCESS_STATE_LBA: case SCSI_ACCESS_STATE_TRANSITIONING: return BLK_STS_OK; default: req->rq_flags |= RQF_QUIET; return BLK_STS_IOERR; } } static void alua_rescan(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; alua_initialize(sdev, h); } /* * alua_bus_attach - Attach device handler * @sdev: device to be attached to */ static int alua_bus_attach(struct scsi_device *sdev) { struct alua_dh_data *h; int err; h = kzalloc(sizeof(*h) , GFP_KERNEL); if (!h) return SCSI_DH_NOMEM; spin_lock_init(&h->pg_lock); rcu_assign_pointer(h->pg, NULL); h->init_error = SCSI_DH_OK; h->sdev = sdev; INIT_LIST_HEAD(&h->node); mutex_init(&h->init_mutex); err = alua_initialize(sdev, h); if (err != SCSI_DH_OK && err != SCSI_DH_DEV_OFFLINED) goto failed; sdev->handler_data = h; return SCSI_DH_OK; failed: kfree(h); return err; } /* * alua_bus_detach - Detach device handler * @sdev: device to be detached from */ static void alua_bus_detach(struct scsi_device *sdev) { struct alua_dh_data *h = sdev->handler_data; struct alua_port_group *pg; spin_lock(&h->pg_lock); pg = rcu_dereference_protected(h->pg, lockdep_is_held(&h->pg_lock)); rcu_assign_pointer(h->pg, NULL); spin_unlock(&h->pg_lock); if (pg) { spin_lock_irq(&pg->lock); list_del_rcu(&h->node); spin_unlock_irq(&pg->lock); kref_put(&pg->kref, release_port_group); } sdev->handler_data = NULL; synchronize_rcu(); kfree(h); } static struct scsi_device_handler alua_dh = { .name = ALUA_DH_NAME, .module = THIS_MODULE, .attach = alua_bus_attach, .detach = alua_bus_detach, .prep_fn = alua_prep_fn, .check_sense = alua_check_sense, .activate = alua_activate, .rescan = alua_rescan, .set_params = alua_set_params, }; static int __init alua_init(void) { int r; kaluad_wq = alloc_workqueue("kaluad", WQ_MEM_RECLAIM, 0); if (!kaluad_wq) return -ENOMEM; r = scsi_register_device_handler(&alua_dh); if (r != 0) { printk(KERN_ERR "%s: Failed to register scsi device handler", ALUA_DH_NAME); destroy_workqueue(kaluad_wq); } return r; } static void __exit alua_exit(void) { scsi_unregister_device_handler(&alua_dh); destroy_workqueue(kaluad_wq); } module_init(alua_init); module_exit(alua_exit); MODULE_DESCRIPTION("DM Multipath ALUA support"); MODULE_AUTHOR("Hannes Reinecke <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_VERSION(ALUA_DH_VER);
/* * Copyright 2021 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include "cgrp.h" #include "chan.h" #include "chid.h" #include "runl.h" #include "priv.h" #include <core/gpuobj.h> #include <subdev/mmu.h> static void nvkm_cgrp_ectx_put(struct nvkm_cgrp *cgrp, struct nvkm_ectx **pectx) { struct nvkm_ectx *ectx = *pectx; if (ectx) { struct nvkm_engn *engn = ectx->engn; if (refcount_dec_and_test(&ectx->refs)) { CGRP_TRACE(cgrp, "dtor ectx %d[%s]", engn->id, engn->engine->subdev.name); nvkm_object_del(&ectx->object); list_del(&ectx->head); kfree(ectx); } *pectx = NULL; } } static int nvkm_cgrp_ectx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_ectx **pectx, struct nvkm_chan *chan, struct nvkm_client *client) { struct nvkm_engine *engine = engn->engine; struct nvkm_oclass cclass = { .client = client, .engine = engine, }; struct nvkm_ectx *ectx; int ret = 0; /* Look for an existing context for this engine in the channel group. */ ectx = nvkm_list_find(ectx, &cgrp->ectxs, head, ectx->engn == engn); if (ectx) { refcount_inc(&ectx->refs); *pectx = ectx; return 0; } /* Nope - create a fresh one. */ CGRP_TRACE(cgrp, "ctor ectx %d[%s]", engn->id, engn->engine->subdev.name); if (!(ectx = *pectx = kzalloc(sizeof(*ectx), GFP_KERNEL))) return -ENOMEM; ectx->engn = engn; refcount_set(&ectx->refs, 1); refcount_set(&ectx->uses, 0); list_add_tail(&ectx->head, &cgrp->ectxs); /* Allocate the HW structures. */ if (engine->func->fifo.cclass) ret = engine->func->fifo.cclass(chan, &cclass, &ectx->object); else if (engine->func->cclass) ret = nvkm_object_new_(engine->func->cclass, &cclass, NULL, 0, &ectx->object); if (ret) nvkm_cgrp_ectx_put(cgrp, pectx); return ret; } void nvkm_cgrp_vctx_put(struct nvkm_cgrp *cgrp, struct nvkm_vctx **pvctx) { struct nvkm_vctx *vctx = *pvctx; if (vctx) { struct nvkm_engn *engn = vctx->ectx->engn; if (refcount_dec_and_test(&vctx->refs)) { CGRP_TRACE(cgrp, "dtor vctx %d[%s]", engn->id, engn->engine->subdev.name); nvkm_vmm_put(vctx->vmm, &vctx->vma); nvkm_gpuobj_del(&vctx->inst); nvkm_cgrp_ectx_put(cgrp, &vctx->ectx); if (vctx->vmm) { atomic_dec(&vctx->vmm->engref[engn->engine->subdev.type]); nvkm_vmm_unref(&vctx->vmm); } list_del(&vctx->head); kfree(vctx); } *pvctx = NULL; } } int nvkm_cgrp_vctx_get(struct nvkm_cgrp *cgrp, struct nvkm_engn *engn, struct nvkm_chan *chan, struct nvkm_vctx **pvctx, struct nvkm_client *client) { struct nvkm_ectx *ectx; struct nvkm_vctx *vctx; int ret; /* Look for an existing sub-context for this engine+VEID in the channel group. */ vctx = nvkm_list_find(vctx, &cgrp->vctxs, head, vctx->ectx->engn == engn && vctx->vmm == chan->vmm); if (vctx) { refcount_inc(&vctx->refs); *pvctx = vctx; return 0; } /* Nope - create a fresh one. But, context first. */ ret = nvkm_cgrp_ectx_get(cgrp, engn, &ectx, chan, client); if (ret) { CGRP_ERROR(cgrp, "ectx %d[%s]: %d", engn->id, engn->engine->subdev.name, ret); return ret; } /* Now, create the sub-context. */ CGRP_TRACE(cgrp, "ctor vctx %d[%s]", engn->id, engn->engine->subdev.name); if (!(vctx = *pvctx = kzalloc(sizeof(*vctx), GFP_KERNEL))) { nvkm_cgrp_ectx_put(cgrp, &ectx); return -ENOMEM; } vctx->ectx = ectx; vctx->vmm = nvkm_vmm_ref(chan->vmm); refcount_set(&vctx->refs, 1); list_add_tail(&vctx->head, &cgrp->vctxs); /* MMU on some GPUs needs to know engine usage for TLB invalidation. */ if (vctx->vmm) atomic_inc(&vctx->vmm->engref[engn->engine->subdev.type]); /* Allocate the HW structures. */ if (engn->func->ctor2) { ret = engn->func->ctor2(engn, vctx, chan); } else if (engn->func->bind) { ret = nvkm_object_bind(vctx->ectx->object, NULL, 0, &vctx->inst); if (ret == 0 && engn->func->ctor) ret = engn->func->ctor(engn, vctx); } if (ret) nvkm_cgrp_vctx_put(cgrp, pvctx); return ret; } static void nvkm_cgrp_del(struct kref *kref) { struct nvkm_cgrp *cgrp = container_of(kref, typeof(*cgrp), kref); struct nvkm_runl *runl = cgrp->runl; if (runl->cgid) nvkm_chid_put(runl->cgid, cgrp->id, &cgrp->lock); mutex_destroy(&cgrp->mutex); nvkm_vmm_unref(&cgrp->vmm); kfree(cgrp); } void nvkm_cgrp_unref(struct nvkm_cgrp **pcgrp) { struct nvkm_cgrp *cgrp = *pcgrp; if (!cgrp) return; kref_put(&cgrp->kref, nvkm_cgrp_del); *pcgrp = NULL; } struct nvkm_cgrp * nvkm_cgrp_ref(struct nvkm_cgrp *cgrp) { if (cgrp) kref_get(&cgrp->kref); return cgrp; } void nvkm_cgrp_put(struct nvkm_cgrp **pcgrp, unsigned long irqflags) { struct nvkm_cgrp *cgrp = *pcgrp; if (!cgrp) return; *pcgrp = NULL; spin_unlock_irqrestore(&cgrp->lock, irqflags); } int nvkm_cgrp_new(struct nvkm_runl *runl, const char *name, struct nvkm_vmm *vmm, bool hw, struct nvkm_cgrp **pcgrp) { struct nvkm_cgrp *cgrp; if (!(cgrp = *pcgrp = kmalloc(sizeof(*cgrp), GFP_KERNEL))) return -ENOMEM; cgrp->func = runl->fifo->func->cgrp.func; strscpy(cgrp->name, name, sizeof(cgrp->name)); cgrp->runl = runl; cgrp->vmm = nvkm_vmm_ref(vmm); cgrp->hw = hw; cgrp->id = -1; kref_init(&cgrp->kref); INIT_LIST_HEAD(&cgrp->chans); cgrp->chan_nr = 0; spin_lock_init(&cgrp->lock); INIT_LIST_HEAD(&cgrp->ectxs); INIT_LIST_HEAD(&cgrp->vctxs); mutex_init(&cgrp->mutex); atomic_set(&cgrp->rc, NVKM_CGRP_RC_NONE); if (runl->cgid) { cgrp->id = nvkm_chid_get(runl->cgid, cgrp); if (cgrp->id < 0) { RUNL_ERROR(runl, "!cgids"); nvkm_cgrp_unref(pcgrp); return -ENOSPC; } } return 0; }
// SPDX-License-Identifier: GPL-2.0+ /* Broadcom BCM54140 Quad SGMII/QSGMII Copper/Fiber Gigabit PHY * * Copyright (c) 2020 Michael Walle <[email protected]> */ #include <linux/bitfield.h> #include <linux/brcmphy.h> #include <linux/hwmon.h> #include <linux/module.h> #include <linux/phy.h> #include "bcm-phy-lib.h" /* RDB per-port registers */ #define BCM54140_RDB_ISR 0x00a /* interrupt status */ #define BCM54140_RDB_IMR 0x00b /* interrupt mask */ #define BCM54140_RDB_INT_LINK BIT(1) /* link status changed */ #define BCM54140_RDB_INT_SPEED BIT(2) /* link speed change */ #define BCM54140_RDB_INT_DUPLEX BIT(3) /* duplex mode changed */ #define BCM54140_RDB_SPARE1 0x012 /* spare control 1 */ #define BCM54140_RDB_SPARE1_LSLM BIT(2) /* link speed LED mode */ #define BCM54140_RDB_SPARE2 0x014 /* spare control 2 */ #define BCM54140_RDB_SPARE2_WS_RTRY_DIS BIT(8) /* wirespeed retry disable */ #define BCM54140_RDB_SPARE2_WS_RTRY_LIMIT GENMASK(4, 2) /* retry limit */ #define BCM54140_RDB_SPARE3 0x015 /* spare control 3 */ #define BCM54140_RDB_SPARE3_BIT0 BIT(0) #define BCM54140_RDB_LED_CTRL 0x019 /* LED control */ #define BCM54140_RDB_LED_CTRL_ACTLINK0 BIT(4) #define BCM54140_RDB_LED_CTRL_ACTLINK1 BIT(8) #define BCM54140_RDB_C_APWR 0x01a /* auto power down control */ #define BCM54140_RDB_C_APWR_SINGLE_PULSE BIT(8) /* single pulse */ #define BCM54140_RDB_C_APWR_APD_MODE_DIS 0 /* ADP disable */ #define BCM54140_RDB_C_APWR_APD_MODE_EN 1 /* ADP enable */ #define BCM54140_RDB_C_APWR_APD_MODE_DIS2 2 /* ADP disable */ #define BCM54140_RDB_C_APWR_APD_MODE_EN_ANEG 3 /* ADP enable w/ aneg */ #define BCM54140_RDB_C_APWR_APD_MODE_MASK GENMASK(6, 5) #define BCM54140_RDB_C_APWR_SLP_TIM_MASK BIT(4)/* sleep timer */ #define BCM54140_RDB_C_APWR_SLP_TIM_2_7 0 /* 2.7s */ #define BCM54140_RDB_C_APWR_SLP_TIM_5_4 1 /* 5.4s */ #define BCM54140_RDB_C_PWR 0x02a /* copper power control */ #define BCM54140_RDB_C_PWR_ISOLATE BIT(5) /* super isolate mode */ #define BCM54140_RDB_C_MISC_CTRL 0x02f /* misc copper control */ #define BCM54140_RDB_C_MISC_CTRL_WS_EN BIT(4) /* wirespeed enable */ /* RDB global registers */ #define BCM54140_RDB_TOP_IMR 0x82d /* interrupt mask */ #define BCM54140_RDB_TOP_IMR_PORT0 BIT(4) #define BCM54140_RDB_TOP_IMR_PORT1 BIT(5) #define BCM54140_RDB_TOP_IMR_PORT2 BIT(6) #define BCM54140_RDB_TOP_IMR_PORT3 BIT(7) #define BCM54140_RDB_MON_CTRL 0x831 /* monitor control */ #define BCM54140_RDB_MON_CTRL_V_MODE BIT(3) /* voltage mode */ #define BCM54140_RDB_MON_CTRL_SEL_MASK GENMASK(2, 1) #define BCM54140_RDB_MON_CTRL_SEL_TEMP 0 /* meassure temperature */ #define BCM54140_RDB_MON_CTRL_SEL_1V0 1 /* meassure AVDDL 1.0V */ #define BCM54140_RDB_MON_CTRL_SEL_3V3 2 /* meassure AVDDH 3.3V */ #define BCM54140_RDB_MON_CTRL_SEL_RR 3 /* meassure all round-robin */ #define BCM54140_RDB_MON_CTRL_PWR_DOWN BIT(0) /* power-down monitor */ #define BCM54140_RDB_MON_TEMP_VAL 0x832 /* temperature value */ #define BCM54140_RDB_MON_TEMP_MAX 0x833 /* temperature high thresh */ #define BCM54140_RDB_MON_TEMP_MIN 0x834 /* temperature low thresh */ #define BCM54140_RDB_MON_TEMP_DATA_MASK GENMASK(9, 0) #define BCM54140_RDB_MON_1V0_VAL 0x835 /* AVDDL 1.0V value */ #define BCM54140_RDB_MON_1V0_MAX 0x836 /* AVDDL 1.0V high thresh */ #define BCM54140_RDB_MON_1V0_MIN 0x837 /* AVDDL 1.0V low thresh */ #define BCM54140_RDB_MON_1V0_DATA_MASK GENMASK(10, 0) #define BCM54140_RDB_MON_3V3_VAL 0x838 /* AVDDH 3.3V value */ #define BCM54140_RDB_MON_3V3_MAX 0x839 /* AVDDH 3.3V high thresh */ #define BCM54140_RDB_MON_3V3_MIN 0x83a /* AVDDH 3.3V low thresh */ #define BCM54140_RDB_MON_3V3_DATA_MASK GENMASK(11, 0) #define BCM54140_RDB_MON_ISR 0x83b /* interrupt status */ #define BCM54140_RDB_MON_ISR_3V3 BIT(2) /* AVDDH 3.3V alarm */ #define BCM54140_RDB_MON_ISR_1V0 BIT(1) /* AVDDL 1.0V alarm */ #define BCM54140_RDB_MON_ISR_TEMP BIT(0) /* temperature alarm */ /* According to the datasheet the formula is: * T = 413.35 - (0.49055 * bits[9:0]) */ #define BCM54140_HWMON_TO_TEMP(v) (413350L - (v) * 491) #define BCM54140_HWMON_FROM_TEMP(v) DIV_ROUND_CLOSEST_ULL(413350L - (v), 491) /* According to the datasheet the formula is: * U = bits[11:0] / 1024 * 220 / 0.2 * * Normalized: * U = bits[11:0] / 4096 * 2514 */ #define BCM54140_HWMON_TO_IN_1V0(v) ((v) * 2514 >> 11) #define BCM54140_HWMON_FROM_IN_1V0(v) DIV_ROUND_CLOSEST_ULL(((v) << 11), 2514) /* According to the datasheet the formula is: * U = bits[10:0] / 1024 * 880 / 0.7 * * Normalized: * U = bits[10:0] / 2048 * 4400 */ #define BCM54140_HWMON_TO_IN_3V3(v) ((v) * 4400 >> 12) #define BCM54140_HWMON_FROM_IN_3V3(v) DIV_ROUND_CLOSEST_ULL(((v) << 12), 4400) #define BCM54140_HWMON_TO_IN(ch, v) ((ch) ? BCM54140_HWMON_TO_IN_3V3(v) \ : BCM54140_HWMON_TO_IN_1V0(v)) #define BCM54140_HWMON_FROM_IN(ch, v) ((ch) ? BCM54140_HWMON_FROM_IN_3V3(v) \ : BCM54140_HWMON_FROM_IN_1V0(v)) #define BCM54140_HWMON_IN_MASK(ch) ((ch) ? BCM54140_RDB_MON_3V3_DATA_MASK \ : BCM54140_RDB_MON_1V0_DATA_MASK) #define BCM54140_HWMON_IN_VAL_REG(ch) ((ch) ? BCM54140_RDB_MON_3V3_VAL \ : BCM54140_RDB_MON_1V0_VAL) #define BCM54140_HWMON_IN_MIN_REG(ch) ((ch) ? BCM54140_RDB_MON_3V3_MIN \ : BCM54140_RDB_MON_1V0_MIN) #define BCM54140_HWMON_IN_MAX_REG(ch) ((ch) ? BCM54140_RDB_MON_3V3_MAX \ : BCM54140_RDB_MON_1V0_MAX) #define BCM54140_HWMON_IN_ALARM_BIT(ch) ((ch) ? BCM54140_RDB_MON_ISR_3V3 \ : BCM54140_RDB_MON_ISR_1V0) /* This PHY has two different PHY IDs depening on its MODE_SEL pin. This * pin choses between 4x SGMII and QSGMII mode: * AE02_5009 4x SGMII * AE02_5019 QSGMII */ #define BCM54140_PHY_ID_MASK 0xffffffe8 #define BCM54140_PHY_ID_REV(phy_id) ((phy_id) & 0x7) #define BCM54140_REV_B0 1 #define BCM54140_DEFAULT_DOWNSHIFT 5 #define BCM54140_MAX_DOWNSHIFT 9 enum bcm54140_global_phy { BCM54140_BASE_ADDR = 0, }; struct bcm54140_priv { int port; int base_addr; #if IS_ENABLED(CONFIG_HWMON) /* protect the alarm bits */ struct mutex alarm_lock; u16 alarm; #endif }; #if IS_ENABLED(CONFIG_HWMON) static umode_t bcm54140_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) { switch (type) { case hwmon_in: switch (attr) { case hwmon_in_min: case hwmon_in_max: return 0644; case hwmon_in_label: case hwmon_in_input: case hwmon_in_alarm: return 0444; default: return 0; } case hwmon_temp: switch (attr) { case hwmon_temp_min: case hwmon_temp_max: return 0644; case hwmon_temp_input: case hwmon_temp_alarm: return 0444; default: return 0; } default: return 0; } } static int bcm54140_hwmon_read_alarm(struct device *dev, unsigned int bit, long *val) { struct phy_device *phydev = dev_get_drvdata(dev); struct bcm54140_priv *priv = phydev->priv; int tmp, ret = 0; mutex_lock(&priv->alarm_lock); /* latch any alarm bits */ tmp = bcm_phy_read_rdb(phydev, BCM54140_RDB_MON_ISR); if (tmp < 0) { ret = tmp; goto out; } priv->alarm |= tmp; *val = !!(priv->alarm & bit); priv->alarm &= ~bit; out: mutex_unlock(&priv->alarm_lock); return ret; } static int bcm54140_hwmon_read_temp(struct device *dev, u32 attr, long *val) { struct phy_device *phydev = dev_get_drvdata(dev); u16 reg; int tmp; switch (attr) { case hwmon_temp_input: reg = BCM54140_RDB_MON_TEMP_VAL; break; case hwmon_temp_min: reg = BCM54140_RDB_MON_TEMP_MIN; break; case hwmon_temp_max: reg = BCM54140_RDB_MON_TEMP_MAX; break; case hwmon_temp_alarm: return bcm54140_hwmon_read_alarm(dev, BCM54140_RDB_MON_ISR_TEMP, val); default: return -EOPNOTSUPP; } tmp = bcm_phy_read_rdb(phydev, reg); if (tmp < 0) return tmp; *val = BCM54140_HWMON_TO_TEMP(tmp & BCM54140_RDB_MON_TEMP_DATA_MASK); return 0; } static int bcm54140_hwmon_read_in(struct device *dev, u32 attr, int channel, long *val) { struct phy_device *phydev = dev_get_drvdata(dev); u16 bit, reg; int tmp; switch (attr) { case hwmon_in_input: reg = BCM54140_HWMON_IN_VAL_REG(channel); break; case hwmon_in_min: reg = BCM54140_HWMON_IN_MIN_REG(channel); break; case hwmon_in_max: reg = BCM54140_HWMON_IN_MAX_REG(channel); break; case hwmon_in_alarm: bit = BCM54140_HWMON_IN_ALARM_BIT(channel); return bcm54140_hwmon_read_alarm(dev, bit, val); default: return -EOPNOTSUPP; } tmp = bcm_phy_read_rdb(phydev, reg); if (tmp < 0) return tmp; tmp &= BCM54140_HWMON_IN_MASK(channel); *val = BCM54140_HWMON_TO_IN(channel, tmp); return 0; } static int bcm54140_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { switch (type) { case hwmon_temp: return bcm54140_hwmon_read_temp(dev, attr, val); case hwmon_in: return bcm54140_hwmon_read_in(dev, attr, channel, val); default: return -EOPNOTSUPP; } } static const char *const bcm54140_hwmon_in_labels[] = { "AVDDL", "AVDDH", }; static int bcm54140_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) { switch (type) { case hwmon_in: switch (attr) { case hwmon_in_label: *str = bcm54140_hwmon_in_labels[channel]; return 0; default: return -EOPNOTSUPP; } default: return -EOPNOTSUPP; } } static int bcm54140_hwmon_write_temp(struct device *dev, u32 attr, int channel, long val) { struct phy_device *phydev = dev_get_drvdata(dev); u16 mask = BCM54140_RDB_MON_TEMP_DATA_MASK; u16 reg; val = clamp_val(val, BCM54140_HWMON_TO_TEMP(mask), BCM54140_HWMON_TO_TEMP(0)); switch (attr) { case hwmon_temp_min: reg = BCM54140_RDB_MON_TEMP_MIN; break; case hwmon_temp_max: reg = BCM54140_RDB_MON_TEMP_MAX; break; default: return -EOPNOTSUPP; } return bcm_phy_modify_rdb(phydev, reg, mask, BCM54140_HWMON_FROM_TEMP(val)); } static int bcm54140_hwmon_write_in(struct device *dev, u32 attr, int channel, long val) { struct phy_device *phydev = dev_get_drvdata(dev); u16 mask = BCM54140_HWMON_IN_MASK(channel); u16 reg; val = clamp_val(val, 0, BCM54140_HWMON_TO_IN(channel, mask)); switch (attr) { case hwmon_in_min: reg = BCM54140_HWMON_IN_MIN_REG(channel); break; case hwmon_in_max: reg = BCM54140_HWMON_IN_MAX_REG(channel); break; default: return -EOPNOTSUPP; } return bcm_phy_modify_rdb(phydev, reg, mask, BCM54140_HWMON_FROM_IN(channel, val)); } static int bcm54140_hwmon_write(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long val) { switch (type) { case hwmon_temp: return bcm54140_hwmon_write_temp(dev, attr, channel, val); case hwmon_in: return bcm54140_hwmon_write_in(dev, attr, channel, val); default: return -EOPNOTSUPP; } } static const struct hwmon_channel_info * const bcm54140_hwmon_info[] = { HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MIN | HWMON_T_MAX | HWMON_T_ALARM), HWMON_CHANNEL_INFO(in, HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM | HWMON_I_LABEL, HWMON_I_INPUT | HWMON_I_MIN | HWMON_I_MAX | HWMON_I_ALARM | HWMON_I_LABEL), NULL }; static const struct hwmon_ops bcm54140_hwmon_ops = { .is_visible = bcm54140_hwmon_is_visible, .read = bcm54140_hwmon_read, .read_string = bcm54140_hwmon_read_string, .write = bcm54140_hwmon_write, }; static const struct hwmon_chip_info bcm54140_chip_info = { .ops = &bcm54140_hwmon_ops, .info = bcm54140_hwmon_info, }; static int bcm54140_enable_monitoring(struct phy_device *phydev) { u16 mask, set; /* 3.3V voltage mode */ set = BCM54140_RDB_MON_CTRL_V_MODE; /* select round-robin */ mask = BCM54140_RDB_MON_CTRL_SEL_MASK; set |= FIELD_PREP(BCM54140_RDB_MON_CTRL_SEL_MASK, BCM54140_RDB_MON_CTRL_SEL_RR); /* remove power-down bit */ mask |= BCM54140_RDB_MON_CTRL_PWR_DOWN; return bcm_phy_modify_rdb(phydev, BCM54140_RDB_MON_CTRL, mask, set); } static int bcm54140_probe_once(struct phy_device *phydev) { struct device *hwmon; int ret; /* enable hardware monitoring */ ret = bcm54140_enable_monitoring(phydev); if (ret) return ret; hwmon = devm_hwmon_device_register_with_info(&phydev->mdio.dev, "BCM54140", phydev, &bcm54140_chip_info, NULL); return PTR_ERR_OR_ZERO(hwmon); } #endif static int bcm54140_base_read_rdb(struct phy_device *phydev, u16 rdb) { int ret; phy_lock_mdio_bus(phydev); ret = __phy_package_write(phydev, BCM54140_BASE_ADDR, MII_BCM54XX_RDB_ADDR, rdb); if (ret < 0) goto out; ret = __phy_package_read(phydev, BCM54140_BASE_ADDR, MII_BCM54XX_RDB_DATA); out: phy_unlock_mdio_bus(phydev); return ret; } static int bcm54140_base_write_rdb(struct phy_device *phydev, u16 rdb, u16 val) { int ret; phy_lock_mdio_bus(phydev); ret = __phy_package_write(phydev, BCM54140_BASE_ADDR, MII_BCM54XX_RDB_ADDR, rdb); if (ret < 0) goto out; ret = __phy_package_write(phydev, BCM54140_BASE_ADDR, MII_BCM54XX_RDB_DATA, val); out: phy_unlock_mdio_bus(phydev); return ret; } /* Under some circumstances a core PLL may not lock, this will then prevent * a successful link establishment. Restart the PLL after the voltages are * stable to workaround this issue. */ static int bcm54140_b0_workaround(struct phy_device *phydev) { int spare3; int ret; spare3 = bcm_phy_read_rdb(phydev, BCM54140_RDB_SPARE3); if (spare3 < 0) return spare3; spare3 &= ~BCM54140_RDB_SPARE3_BIT0; ret = bcm_phy_write_rdb(phydev, BCM54140_RDB_SPARE3, spare3); if (ret) return ret; ret = phy_modify(phydev, MII_BMCR, 0, BMCR_PDOWN); if (ret) return ret; ret = phy_modify(phydev, MII_BMCR, BMCR_PDOWN, 0); if (ret) return ret; spare3 |= BCM54140_RDB_SPARE3_BIT0; return bcm_phy_write_rdb(phydev, BCM54140_RDB_SPARE3, spare3); } /* The BCM54140 is a quad PHY where only the first port has access to the * global register. Thus we need to find out its PHY address. * */ static int bcm54140_get_base_addr_and_port(struct phy_device *phydev) { struct bcm54140_priv *priv = phydev->priv; struct mii_bus *bus = phydev->mdio.bus; int addr, min_addr, max_addr; int step = 1; u32 phy_id; int tmp; min_addr = phydev->mdio.addr; max_addr = phydev->mdio.addr; addr = phydev->mdio.addr; /* We scan forward and backwards and look for PHYs which have the * same phy_id like we do. Step 1 will scan forward, step 2 * backwards. Once we are finished, we have a min_addr and * max_addr which resembles the range of PHY addresses of the same * type of PHY. There is one caveat; there may be many PHYs of * the same type, but we know that each PHY takes exactly 4 * consecutive addresses. Therefore we can deduce our offset * to the base address of this quad PHY. */ while (1) { if (step == 3) { break; } else if (step == 1) { max_addr = addr; addr++; } else { min_addr = addr; addr--; } if (addr < 0 || addr >= PHY_MAX_ADDR) { addr = phydev->mdio.addr; step++; continue; } /* read the PHY id */ tmp = mdiobus_read(bus, addr, MII_PHYSID1); if (tmp < 0) return tmp; phy_id = tmp << 16; tmp = mdiobus_read(bus, addr, MII_PHYSID2); if (tmp < 0) return tmp; phy_id |= tmp; /* see if it is still the same PHY */ if ((phy_id & phydev->drv->phy_id_mask) != (phydev->drv->phy_id & phydev->drv->phy_id_mask)) { addr = phydev->mdio.addr; step++; } } /* The range we get should be a multiple of four. Please note that both * the min_addr and max_addr are inclusive. So we have to add one if we * subtract them. */ if ((max_addr - min_addr + 1) % 4) { dev_err(&phydev->mdio.dev, "Detected Quad PHY IDs %d..%d doesn't make sense.\n", min_addr, max_addr); return -EINVAL; } priv->port = (phydev->mdio.addr - min_addr) % 4; priv->base_addr = phydev->mdio.addr - priv->port; return 0; } static int bcm54140_probe(struct phy_device *phydev) { struct bcm54140_priv *priv; int ret; priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; phydev->priv = priv; ret = bcm54140_get_base_addr_and_port(phydev); if (ret) return ret; devm_phy_package_join(&phydev->mdio.dev, phydev, priv->base_addr, 0); #if IS_ENABLED(CONFIG_HWMON) mutex_init(&priv->alarm_lock); if (phy_package_init_once(phydev)) { ret = bcm54140_probe_once(phydev); if (ret) return ret; } #endif phydev_dbg(phydev, "probed (port %d, base PHY address %d)\n", priv->port, priv->base_addr); return 0; } static int bcm54140_config_init(struct phy_device *phydev) { u16 reg = 0xffff; int ret; /* Apply hardware errata */ if (BCM54140_PHY_ID_REV(phydev->phy_id) == BCM54140_REV_B0) { ret = bcm54140_b0_workaround(phydev); if (ret) return ret; } /* Unmask events we are interested in. */ reg &= ~(BCM54140_RDB_INT_DUPLEX | BCM54140_RDB_INT_SPEED | BCM54140_RDB_INT_LINK); ret = bcm_phy_write_rdb(phydev, BCM54140_RDB_IMR, reg); if (ret) return ret; /* LED1=LINKSPD[1], LED2=LINKSPD[2], LED3=LINK/ACTIVITY */ ret = bcm_phy_modify_rdb(phydev, BCM54140_RDB_SPARE1, 0, BCM54140_RDB_SPARE1_LSLM); if (ret) return ret; ret = bcm_phy_modify_rdb(phydev, BCM54140_RDB_LED_CTRL, 0, BCM54140_RDB_LED_CTRL_ACTLINK0); if (ret) return ret; /* disable super isolate mode */ return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_PWR, BCM54140_RDB_C_PWR_ISOLATE, 0); } static irqreturn_t bcm54140_handle_interrupt(struct phy_device *phydev) { int irq_status, irq_mask; irq_status = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR); if (irq_status < 0) { phy_error(phydev); return IRQ_NONE; } irq_mask = bcm_phy_read_rdb(phydev, BCM54140_RDB_IMR); if (irq_mask < 0) { phy_error(phydev); return IRQ_NONE; } irq_mask = ~irq_mask; if (!(irq_status & irq_mask)) return IRQ_NONE; phy_trigger_machine(phydev); return IRQ_HANDLED; } static int bcm54140_ack_intr(struct phy_device *phydev) { int reg; /* clear pending interrupts */ reg = bcm_phy_read_rdb(phydev, BCM54140_RDB_ISR); if (reg < 0) return reg; return 0; } static int bcm54140_config_intr(struct phy_device *phydev) { struct bcm54140_priv *priv = phydev->priv; static const u16 port_to_imr_bit[] = { BCM54140_RDB_TOP_IMR_PORT0, BCM54140_RDB_TOP_IMR_PORT1, BCM54140_RDB_TOP_IMR_PORT2, BCM54140_RDB_TOP_IMR_PORT3, }; int reg, err; if (priv->port >= ARRAY_SIZE(port_to_imr_bit)) return -EINVAL; reg = bcm54140_base_read_rdb(phydev, BCM54140_RDB_TOP_IMR); if (reg < 0) return reg; if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { err = bcm54140_ack_intr(phydev); if (err) return err; reg &= ~port_to_imr_bit[priv->port]; err = bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg); } else { reg |= port_to_imr_bit[priv->port]; err = bcm54140_base_write_rdb(phydev, BCM54140_RDB_TOP_IMR, reg); if (err) return err; err = bcm54140_ack_intr(phydev); } return err; } static int bcm54140_get_downshift(struct phy_device *phydev, u8 *data) { int val; val = bcm_phy_read_rdb(phydev, BCM54140_RDB_C_MISC_CTRL); if (val < 0) return val; if (!(val & BCM54140_RDB_C_MISC_CTRL_WS_EN)) { *data = DOWNSHIFT_DEV_DISABLE; return 0; } val = bcm_phy_read_rdb(phydev, BCM54140_RDB_SPARE2); if (val < 0) return val; if (val & BCM54140_RDB_SPARE2_WS_RTRY_DIS) *data = 1; else *data = FIELD_GET(BCM54140_RDB_SPARE2_WS_RTRY_LIMIT, val) + 2; return 0; } static int bcm54140_set_downshift(struct phy_device *phydev, u8 cnt) { u16 mask, set; int ret; if (cnt > BCM54140_MAX_DOWNSHIFT && cnt != DOWNSHIFT_DEV_DEFAULT_COUNT) return -EINVAL; if (!cnt) return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_MISC_CTRL, BCM54140_RDB_C_MISC_CTRL_WS_EN, 0); if (cnt == DOWNSHIFT_DEV_DEFAULT_COUNT) cnt = BCM54140_DEFAULT_DOWNSHIFT; if (cnt == 1) { mask = 0; set = BCM54140_RDB_SPARE2_WS_RTRY_DIS; } else { mask = BCM54140_RDB_SPARE2_WS_RTRY_DIS; mask |= BCM54140_RDB_SPARE2_WS_RTRY_LIMIT; set = FIELD_PREP(BCM54140_RDB_SPARE2_WS_RTRY_LIMIT, cnt - 2); } ret = bcm_phy_modify_rdb(phydev, BCM54140_RDB_SPARE2, mask, set); if (ret) return ret; return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_MISC_CTRL, 0, BCM54140_RDB_C_MISC_CTRL_WS_EN); } static int bcm54140_get_edpd(struct phy_device *phydev, u16 *tx_interval) { int val; val = bcm_phy_read_rdb(phydev, BCM54140_RDB_C_APWR); if (val < 0) return val; switch (FIELD_GET(BCM54140_RDB_C_APWR_APD_MODE_MASK, val)) { case BCM54140_RDB_C_APWR_APD_MODE_DIS: case BCM54140_RDB_C_APWR_APD_MODE_DIS2: *tx_interval = ETHTOOL_PHY_EDPD_DISABLE; break; case BCM54140_RDB_C_APWR_APD_MODE_EN: case BCM54140_RDB_C_APWR_APD_MODE_EN_ANEG: switch (FIELD_GET(BCM54140_RDB_C_APWR_SLP_TIM_MASK, val)) { case BCM54140_RDB_C_APWR_SLP_TIM_2_7: *tx_interval = 2700; break; case BCM54140_RDB_C_APWR_SLP_TIM_5_4: *tx_interval = 5400; break; } } return 0; } static int bcm54140_set_edpd(struct phy_device *phydev, u16 tx_interval) { u16 mask, set; mask = BCM54140_RDB_C_APWR_APD_MODE_MASK; if (tx_interval == ETHTOOL_PHY_EDPD_DISABLE) set = FIELD_PREP(BCM54140_RDB_C_APWR_APD_MODE_MASK, BCM54140_RDB_C_APWR_APD_MODE_DIS); else set = FIELD_PREP(BCM54140_RDB_C_APWR_APD_MODE_MASK, BCM54140_RDB_C_APWR_APD_MODE_EN_ANEG); /* enable single pulse mode */ set |= BCM54140_RDB_C_APWR_SINGLE_PULSE; /* set sleep timer */ mask |= BCM54140_RDB_C_APWR_SLP_TIM_MASK; switch (tx_interval) { case ETHTOOL_PHY_EDPD_DFLT_TX_MSECS: case ETHTOOL_PHY_EDPD_DISABLE: case 2700: set |= BCM54140_RDB_C_APWR_SLP_TIM_2_7; break; case 5400: set |= BCM54140_RDB_C_APWR_SLP_TIM_5_4; break; default: return -EINVAL; } return bcm_phy_modify_rdb(phydev, BCM54140_RDB_C_APWR, mask, set); } static int bcm54140_get_tunable(struct phy_device *phydev, struct ethtool_tunable *tuna, void *data) { switch (tuna->id) { case ETHTOOL_PHY_DOWNSHIFT: return bcm54140_get_downshift(phydev, data); case ETHTOOL_PHY_EDPD: return bcm54140_get_edpd(phydev, data); default: return -EOPNOTSUPP; } } static int bcm54140_set_tunable(struct phy_device *phydev, struct ethtool_tunable *tuna, const void *data) { switch (tuna->id) { case ETHTOOL_PHY_DOWNSHIFT: return bcm54140_set_downshift(phydev, *(const u8 *)data); case ETHTOOL_PHY_EDPD: return bcm54140_set_edpd(phydev, *(const u16 *)data); default: return -EOPNOTSUPP; } } static struct phy_driver bcm54140_drivers[] = { { .phy_id = PHY_ID_BCM54140, .phy_id_mask = BCM54140_PHY_ID_MASK, .name = "Broadcom BCM54140", .flags = PHY_POLL_CABLE_TEST, .features = PHY_GBIT_FEATURES, .config_init = bcm54140_config_init, .handle_interrupt = bcm54140_handle_interrupt, .config_intr = bcm54140_config_intr, .probe = bcm54140_probe, .suspend = genphy_suspend, .resume = genphy_resume, .soft_reset = genphy_soft_reset, .get_tunable = bcm54140_get_tunable, .set_tunable = bcm54140_set_tunable, .cable_test_start = bcm_phy_cable_test_start_rdb, .cable_test_get_status = bcm_phy_cable_test_get_status_rdb, }, }; module_phy_driver(bcm54140_drivers); static struct mdio_device_id __maybe_unused bcm54140_tbl[] = { { PHY_ID_BCM54140, BCM54140_PHY_ID_MASK }, { } }; MODULE_AUTHOR("Michael Walle"); MODULE_DESCRIPTION("Broadcom BCM54140 PHY driver"); MODULE_DEVICE_TABLE(mdio, bcm54140_tbl); MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: GPL-2.0-only */ /* * ispstat.h * * TI OMAP3 ISP - Statistics core * * Copyright (C) 2010 Nokia Corporation * Copyright (C) 2009 Texas Instruments, Inc * * Contacts: David Cohen <[email protected]> * Laurent Pinchart <[email protected]> * Sakari Ailus <[email protected]> */ #ifndef OMAP3_ISP_STAT_H #define OMAP3_ISP_STAT_H #include <linux/types.h> #include <linux/omap3isp.h> #include <media/v4l2-event.h> #include "isp.h" #include "ispvideo.h" #define STAT_MAX_BUFS 5 #define STAT_NEVENTS 8 #define STAT_BUF_DONE 0 /* Buffer is ready */ #define STAT_NO_BUF 1 /* An error has occurred */ #define STAT_BUF_WAITING_DMA 2 /* Histogram only: DMA is running */ struct dma_chan; struct ispstat; struct ispstat_buffer { struct sg_table sgt; void *virt_addr; dma_addr_t dma_addr; struct timespec64 ts; u32 buf_size; u32 frame_number; u16 config_counter; u8 empty; }; struct ispstat_ops { /* * Validate new params configuration. * new_conf->buf_size value must be changed to the exact buffer size * necessary for the new configuration if it's smaller. */ int (*validate_params)(struct ispstat *stat, void *new_conf); /* * Save new params configuration. * stat->priv->buf_size value must be set to the exact buffer size for * the new configuration. * stat->update is set to 1 if new configuration is different than * current one. */ void (*set_params)(struct ispstat *stat, void *new_conf); /* Apply stored configuration. */ void (*setup_regs)(struct ispstat *stat, void *priv); /* Enable/Disable module. */ void (*enable)(struct ispstat *stat, int enable); /* Verify is module is busy. */ int (*busy)(struct ispstat *stat); /* Used for specific operations during generic buf process task. */ int (*buf_process)(struct ispstat *stat); }; enum ispstat_state_t { ISPSTAT_DISABLED = 0, ISPSTAT_DISABLING, ISPSTAT_ENABLED, ISPSTAT_ENABLING, ISPSTAT_SUSPENDED, }; struct ispstat { struct v4l2_subdev subdev; struct media_pad pad; /* sink pad */ /* Control */ unsigned configured:1; unsigned update:1; unsigned buf_processing:1; unsigned sbl_ovl_recover:1; u8 inc_config; atomic_t buf_err; enum ispstat_state_t state; /* enabling/disabling state */ struct isp_device *isp; void *priv; /* pointer to priv config struct */ void *recover_priv; /* pointer to recover priv configuration */ struct mutex ioctl_lock; /* serialize private ioctl */ const struct ispstat_ops *ops; /* Buffer */ u8 wait_acc_frames; u16 config_counter; u32 frame_number; u32 buf_size; u32 buf_alloc_size; struct dma_chan *dma_ch; unsigned long event_type; struct ispstat_buffer *buf; struct ispstat_buffer *active_buf; struct ispstat_buffer *locked_buf; }; struct ispstat_generic_config { /* * Fields must be in the same order as in: * - omap3isp_h3a_aewb_config * - omap3isp_h3a_af_config * - omap3isp_hist_config */ u32 buf_size; u16 config_counter; }; int omap3isp_stat_config(struct ispstat *stat, void *new_conf); int omap3isp_stat_request_statistics(struct ispstat *stat, struct omap3isp_stat_data *data); int omap3isp_stat_request_statistics_time32(struct ispstat *stat, struct omap3isp_stat_data_time32 *data); int omap3isp_stat_init(struct ispstat *stat, const char *name, const struct v4l2_subdev_ops *sd_ops); void omap3isp_stat_cleanup(struct ispstat *stat); int omap3isp_stat_subscribe_event(struct v4l2_subdev *subdev, struct v4l2_fh *fh, struct v4l2_event_subscription *sub); int omap3isp_stat_unsubscribe_event(struct v4l2_subdev *subdev, struct v4l2_fh *fh, struct v4l2_event_subscription *sub); int omap3isp_stat_s_stream(struct v4l2_subdev *subdev, int enable); int omap3isp_stat_busy(struct ispstat *stat); int omap3isp_stat_pcr_busy(struct ispstat *stat); void omap3isp_stat_suspend(struct ispstat *stat); void omap3isp_stat_resume(struct ispstat *stat); int omap3isp_stat_enable(struct ispstat *stat, u8 enable); void omap3isp_stat_sbl_overflow(struct ispstat *stat); void omap3isp_stat_isr(struct ispstat *stat); void omap3isp_stat_isr_frame_sync(struct ispstat *stat); void omap3isp_stat_dma_isr(struct ispstat *stat); int omap3isp_stat_register_entities(struct ispstat *stat, struct v4l2_device *vdev); void omap3isp_stat_unregister_entities(struct ispstat *stat); #endif /* OMAP3_ISP_STAT_H */
// SPDX-License-Identifier: GPL-2.0-or-later /* * IPVS An implementation of the IP virtual server support for the * LINUX operating system. IPVS is now implemented as a module * over the Netfilter framework. IPVS can be used to build a * high-performance and highly available server based on a * cluster of servers. * * Authors: Wensong Zhang <[email protected]> * Peter Kese <[email protected]> * * Changes: */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <asm/string.h> #include <linux/kmod.h> #include <linux/sysctl.h> #include <net/ip_vs.h> EXPORT_SYMBOL(ip_vs_scheduler_err); /* * IPVS scheduler list */ static LIST_HEAD(ip_vs_schedulers); /* semaphore for schedulers */ static DEFINE_MUTEX(ip_vs_sched_mutex); /* * Bind a service with a scheduler */ int ip_vs_bind_scheduler(struct ip_vs_service *svc, struct ip_vs_scheduler *scheduler) { int ret; if (scheduler->init_service) { ret = scheduler->init_service(svc); if (ret) { pr_err("%s(): init error\n", __func__); return ret; } } rcu_assign_pointer(svc->scheduler, scheduler); return 0; } /* * Unbind a service with its scheduler */ void ip_vs_unbind_scheduler(struct ip_vs_service *svc, struct ip_vs_scheduler *sched) { struct ip_vs_scheduler *cur_sched; cur_sched = rcu_dereference_protected(svc->scheduler, 1); /* This check proves that old 'sched' was installed */ if (!cur_sched) return; if (sched->done_service) sched->done_service(svc); /* svc->scheduler can be set to NULL only by caller */ } /* * Get scheduler in the scheduler list by name */ static struct ip_vs_scheduler *ip_vs_sched_getbyname(const char *sched_name) { struct ip_vs_scheduler *sched; IP_VS_DBG(2, "%s(): sched_name \"%s\"\n", __func__, sched_name); mutex_lock(&ip_vs_sched_mutex); list_for_each_entry(sched, &ip_vs_schedulers, n_list) { /* * Test and get the modules atomically */ if (sched->module && !try_module_get(sched->module)) { /* * This scheduler is just deleted */ continue; } if (strcmp(sched_name, sched->name)==0) { /* HIT */ mutex_unlock(&ip_vs_sched_mutex); return sched; } module_put(sched->module); } mutex_unlock(&ip_vs_sched_mutex); return NULL; } /* * Lookup scheduler and try to load it if it doesn't exist */ struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name) { struct ip_vs_scheduler *sched; /* * Search for the scheduler by sched_name */ sched = ip_vs_sched_getbyname(sched_name); /* * If scheduler not found, load the module and search again */ if (sched == NULL) { request_module("ip_vs_%s", sched_name); sched = ip_vs_sched_getbyname(sched_name); } return sched; } void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler) { if (scheduler) module_put(scheduler->module); } /* * Common error output helper for schedulers */ void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) { struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); char *sched_name = sched ? sched->name : "none"; if (svc->fwmark) { IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n", sched_name, svc->fwmark, svc->fwmark, msg); #ifdef CONFIG_IP_VS_IPV6 } else if (svc->af == AF_INET6) { IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", sched_name, ip_vs_proto_name(svc->protocol), &svc->addr.in6, ntohs(svc->port), msg); #endif } else { IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n", sched_name, ip_vs_proto_name(svc->protocol), &svc->addr.ip, ntohs(svc->port), msg); } } /* * Register a scheduler in the scheduler list */ int register_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) { struct ip_vs_scheduler *sched; if (!scheduler) { pr_err("%s(): NULL arg\n", __func__); return -EINVAL; } if (!scheduler->name) { pr_err("%s(): NULL scheduler_name\n", __func__); return -EINVAL; } /* increase the module use count */ if (!ip_vs_use_count_inc()) return -ENOENT; mutex_lock(&ip_vs_sched_mutex); if (!list_empty(&scheduler->n_list)) { mutex_unlock(&ip_vs_sched_mutex); ip_vs_use_count_dec(); pr_err("%s(): [%s] scheduler already linked\n", __func__, scheduler->name); return -EINVAL; } /* * Make sure that the scheduler with this name doesn't exist * in the scheduler list. */ list_for_each_entry(sched, &ip_vs_schedulers, n_list) { if (strcmp(scheduler->name, sched->name) == 0) { mutex_unlock(&ip_vs_sched_mutex); ip_vs_use_count_dec(); pr_err("%s(): [%s] scheduler already existed " "in the system\n", __func__, scheduler->name); return -EINVAL; } } /* * Add it into the d-linked scheduler list */ list_add(&scheduler->n_list, &ip_vs_schedulers); mutex_unlock(&ip_vs_sched_mutex); pr_info("[%s] scheduler registered.\n", scheduler->name); return 0; } /* * Unregister a scheduler from the scheduler list */ int unregister_ip_vs_scheduler(struct ip_vs_scheduler *scheduler) { if (!scheduler) { pr_err("%s(): NULL arg\n", __func__); return -EINVAL; } mutex_lock(&ip_vs_sched_mutex); if (list_empty(&scheduler->n_list)) { mutex_unlock(&ip_vs_sched_mutex); pr_err("%s(): [%s] scheduler is not in the list. failed\n", __func__, scheduler->name); return -EINVAL; } /* * Remove it from the d-linked scheduler list */ list_del(&scheduler->n_list); mutex_unlock(&ip_vs_sched_mutex); /* decrease the module use count */ ip_vs_use_count_dec(); pr_info("[%s] scheduler unregistered.\n", scheduler->name); return 0; }
// SPDX-License-Identifier: GPL-2.0 /* Author: Dmitry Safonov <[email protected]> */ #include <inttypes.h> #include "aolib.h" #define fault(type) (inj == FAULT_ ## type) static inline int test_add_key_maclen(int sk, const char *key, uint8_t maclen, union tcp_addr in_addr, uint8_t prefix, uint8_t sndid, uint8_t rcvid) { struct tcp_ao_add tmp = {}; int err; if (prefix > DEFAULT_TEST_PREFIX) prefix = DEFAULT_TEST_PREFIX; err = test_prepare_key(&tmp, DEFAULT_TEST_ALGO, in_addr, false, false, prefix, 0, sndid, rcvid, maclen, 0, strlen(key), key); if (err) return err; err = setsockopt(sk, IPPROTO_TCP, TCP_AO_ADD_KEY, &tmp, sizeof(tmp)); if (err < 0) return -errno; return test_verify_socket_key(sk, &tmp); } static void try_accept(const char *tst_name, unsigned int port, const char *pwd, union tcp_addr addr, uint8_t prefix, uint8_t sndid, uint8_t rcvid, uint8_t maclen, const char *cnt_name, test_cnt cnt_expected, fault_t inj) { struct tcp_ao_counters ao_cnt1, ao_cnt2; uint64_t before_cnt = 0, after_cnt = 0; /* silence GCC */ int lsk, err, sk = 0; time_t timeout; lsk = test_listen_socket(this_ip_addr, port, 1); if (pwd && test_add_key_maclen(lsk, pwd, maclen, addr, prefix, sndid, rcvid)) test_error("setsockopt(TCP_AO_ADD_KEY)"); if (cnt_name) before_cnt = netstat_get_one(cnt_name, NULL); if (pwd && test_get_tcp_ao_counters(lsk, &ao_cnt1)) test_error("test_get_tcp_ao_counters()"); synchronize_threads(); /* preparations done */ timeout = fault(TIMEOUT) ? TEST_RETRANSMIT_SEC : TEST_TIMEOUT_SEC; err = test_wait_fd(lsk, timeout, 0); if (err == -ETIMEDOUT) { if (!fault(TIMEOUT)) test_fail("timed out for accept()"); } else if (err < 0) { test_error("test_wait_fd()"); } else { if (fault(TIMEOUT)) test_fail("ready to accept"); sk = accept(lsk, NULL, NULL); if (sk < 0) { test_error("accept()"); } else { if (fault(TIMEOUT)) test_fail("%s: accepted", tst_name); } } synchronize_threads(); /* before counter checks */ if (pwd && test_get_tcp_ao_counters(lsk, &ao_cnt2)) test_error("test_get_tcp_ao_counters()"); close(lsk); if (pwd) test_tcp_ao_counters_cmp(tst_name, &ao_cnt1, &ao_cnt2, cnt_expected); if (!cnt_name) goto out; after_cnt = netstat_get_one(cnt_name, NULL); if (after_cnt <= before_cnt) { test_fail("%s: %s counter did not increase: %" PRIu64 " <= %" PRIu64, tst_name, cnt_name, after_cnt, before_cnt); } else { test_ok("%s: counter %s increased %" PRIu64 " => %" PRIu64, tst_name, cnt_name, before_cnt, after_cnt); } out: synchronize_threads(); /* close() */ if (sk > 0) close(sk); } static void *server_fn(void *arg) { union tcp_addr wrong_addr, network_addr; unsigned int port = test_server_port; if (inet_pton(TEST_FAMILY, TEST_WRONG_IP, &wrong_addr) != 1) test_error("Can't convert ip address %s", TEST_WRONG_IP); try_accept("Non-AO server + AO client", port++, NULL, this_ip_dest, -1, 100, 100, 0, "TCPAOKeyNotFound", 0, FAULT_TIMEOUT); try_accept("AO server + Non-AO client", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 0, "TCPAORequired", TEST_CNT_AO_REQUIRED, FAULT_TIMEOUT); try_accept("Wrong password", port++, "something that is not DEFAULT_TEST_PASSWORD", this_ip_dest, -1, 100, 100, 0, "TCPAOBad", TEST_CNT_BAD, FAULT_TIMEOUT); try_accept("Wrong rcv id", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 101, 0, "TCPAOKeyNotFound", TEST_CNT_AO_KEY_NOT_FOUND, FAULT_TIMEOUT); try_accept("Wrong snd id", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 101, 100, 0, "TCPAOGood", TEST_CNT_GOOD, FAULT_TIMEOUT); try_accept("Different maclen", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 8, "TCPAOBad", TEST_CNT_BAD, FAULT_TIMEOUT); try_accept("Server: Wrong addr", port++, DEFAULT_TEST_PASSWORD, wrong_addr, -1, 100, 100, 0, "TCPAOKeyNotFound", TEST_CNT_AO_KEY_NOT_FOUND, FAULT_TIMEOUT); try_accept("Client: Wrong addr", port++, NULL, this_ip_dest, -1, 100, 100, 0, NULL, 0, FAULT_TIMEOUT); try_accept("rcv id != snd id", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 200, 100, 0, "TCPAOGood", TEST_CNT_GOOD, 0); if (inet_pton(TEST_FAMILY, TEST_NETWORK, &network_addr) != 1) test_error("Can't convert ip address %s", TEST_NETWORK); try_accept("Server: prefix match", port++, DEFAULT_TEST_PASSWORD, network_addr, 16, 100, 100, 0, "TCPAOGood", TEST_CNT_GOOD, 0); try_accept("Client: prefix match", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 0, "TCPAOGood", TEST_CNT_GOOD, 0); /* client exits */ synchronize_threads(); return NULL; } static void try_connect(const char *tst_name, unsigned int port, const char *pwd, union tcp_addr addr, uint8_t prefix, uint8_t sndid, uint8_t rcvid, test_cnt cnt_expected, fault_t inj) { struct tcp_ao_counters ao_cnt1, ao_cnt2; time_t timeout; int sk, ret; sk = socket(test_family, SOCK_STREAM, IPPROTO_TCP); if (sk < 0) test_error("socket()"); if (pwd && test_add_key(sk, pwd, addr, prefix, sndid, rcvid)) test_error("setsockopt(TCP_AO_ADD_KEY)"); if (pwd && test_get_tcp_ao_counters(sk, &ao_cnt1)) test_error("test_get_tcp_ao_counters()"); synchronize_threads(); /* preparations done */ timeout = fault(TIMEOUT) ? TEST_RETRANSMIT_SEC : TEST_TIMEOUT_SEC; ret = _test_connect_socket(sk, this_ip_dest, port, timeout); synchronize_threads(); /* before counter checks */ if (ret < 0) { if (fault(KEYREJECT) && ret == -EKEYREJECTED) { test_ok("%s: connect() was prevented", tst_name); } else if (ret == -ETIMEDOUT && fault(TIMEOUT)) { test_ok("%s", tst_name); } else if (ret == -ECONNREFUSED && (fault(TIMEOUT) || fault(KEYREJECT))) { test_ok("%s: refused to connect", tst_name); } else { test_error("%s: connect() returned %d", tst_name, ret); } goto out; } if (fault(TIMEOUT) || fault(KEYREJECT)) test_fail("%s: connected", tst_name); else test_ok("%s: connected", tst_name); if (pwd && ret > 0) { if (test_get_tcp_ao_counters(sk, &ao_cnt2)) test_error("test_get_tcp_ao_counters()"); test_tcp_ao_counters_cmp(tst_name, &ao_cnt1, &ao_cnt2, cnt_expected); } out: synchronize_threads(); /* close() */ if (ret > 0) close(sk); } static void *client_fn(void *arg) { union tcp_addr wrong_addr, network_addr, addr_any = {}; unsigned int port = test_server_port; if (inet_pton(TEST_FAMILY, TEST_WRONG_IP, &wrong_addr) != 1) test_error("Can't convert ip address %s", TEST_WRONG_IP); trace_ao_event_expect(TCP_AO_KEY_NOT_FOUND, this_ip_addr, this_ip_dest, -1, port, 0, 0, 1, 0, 0, 0, 100, 100, -1); try_connect("Non-AO server + AO client", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 0, FAULT_TIMEOUT); trace_hash_event_expect(TCP_HASH_AO_REQUIRED, this_ip_addr, this_ip_dest, -1, port, 0, 0, 1, 0, 0, 0); try_connect("AO server + Non-AO client", port++, NULL, this_ip_dest, -1, 100, 100, 0, FAULT_TIMEOUT); trace_ao_event_expect(TCP_AO_MISMATCH, this_ip_addr, this_ip_dest, -1, port, 0, 0, 1, 0, 0, 0, 100, 100, -1); try_connect("Wrong password", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 0, FAULT_TIMEOUT); trace_ao_event_expect(TCP_AO_KEY_NOT_FOUND, this_ip_addr, this_ip_dest, -1, port, 0, 0, 1, 0, 0, 0, 100, 100, -1); try_connect("Wrong rcv id", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 0, FAULT_TIMEOUT); trace_ao_event_sk_expect(TCP_AO_SYNACK_NO_KEY, this_ip_dest, addr_any, port, 0, 100, 100); try_connect("Wrong snd id", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 0, FAULT_TIMEOUT); trace_ao_event_expect(TCP_AO_WRONG_MACLEN, this_ip_addr, this_ip_dest, -1, port, 0, 0, 1, 0, 0, 0, 100, 100, -1); try_connect("Different maclen", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 0, FAULT_TIMEOUT); trace_ao_event_expect(TCP_AO_KEY_NOT_FOUND, this_ip_addr, this_ip_dest, -1, port, 0, 0, 1, 0, 0, 0, 100, 100, -1); try_connect("Server: Wrong addr", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, 0, FAULT_TIMEOUT); try_connect("Client: Wrong addr", port++, DEFAULT_TEST_PASSWORD, wrong_addr, -1, 100, 100, 0, FAULT_KEYREJECT); try_connect("rcv id != snd id", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 200, TEST_CNT_GOOD, 0); if (inet_pton(TEST_FAMILY, TEST_NETWORK, &network_addr) != 1) test_error("Can't convert ip address %s", TEST_NETWORK); try_connect("Server: prefix match", port++, DEFAULT_TEST_PASSWORD, this_ip_dest, -1, 100, 100, TEST_CNT_GOOD, 0); try_connect("Client: prefix match", port++, DEFAULT_TEST_PASSWORD, network_addr, 16, 100, 100, TEST_CNT_GOOD, 0); return NULL; } int main(int argc, char *argv[]) { test_init(22, server_fn, client_fn); return 0; }
/* * Driver giving user-space access to the kernel's xenbus connection * to xenstore. * * Copyright (c) 2005, Christian Limpach * Copyright (c) 2005, Rusty Russell, IBM Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Changes: * 2008-10-07 Alex Zeffertt Replaced /proc/xen/xenbus with xenfs filesystem * and /proc/xen compatibility mount point. * Turned xenfs into a loadable module. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/errno.h> #include <linux/uio.h> #include <linux/notifier.h> #include <linux/wait.h> #include <linux/fs.h> #include <linux/poll.h> #include <linux/mutex.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/uaccess.h> #include <linux/init.h> #include <linux/namei.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/miscdevice.h> #include <linux/workqueue.h> #include <xen/xenbus.h> #include <xen/xen.h> #include <asm/xen/hypervisor.h> #include "xenbus.h" unsigned int xb_dev_generation_id; /* * An element of a list of outstanding transactions, for which we're * still waiting a reply. */ struct xenbus_transaction_holder { struct list_head list; struct xenbus_transaction handle; unsigned int generation_id; }; /* * A buffer of data on the queue. */ struct read_buffer { struct list_head list; unsigned int cons; unsigned int len; char msg[] __counted_by(len); }; struct xenbus_file_priv { /* * msgbuffer_mutex is held while partial requests are built up * and complete requests are acted on. It therefore protects * the "transactions" and "watches" lists, and the partial * request length and buffer. * * reply_mutex protects the reply being built up to return to * usermode. It nests inside msgbuffer_mutex but may be held * alone during a watch callback. */ struct mutex msgbuffer_mutex; /* In-progress transactions */ struct list_head transactions; /* Active watches. */ struct list_head watches; /* Partial request. */ unsigned int len; union { struct xsd_sockmsg msg; char buffer[XENSTORE_PAYLOAD_MAX]; } u; /* Response queue. */ struct mutex reply_mutex; struct list_head read_buffers; wait_queue_head_t read_waitq; struct kref kref; struct work_struct wq; }; /* Read out any raw xenbus messages queued up. */ static ssize_t xenbus_file_read(struct file *filp, char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_file_priv *u = filp->private_data; struct read_buffer *rb; ssize_t i; int ret; mutex_lock(&u->reply_mutex); again: while (list_empty(&u->read_buffers)) { mutex_unlock(&u->reply_mutex); if (filp->f_flags & O_NONBLOCK) return -EAGAIN; ret = wait_event_interruptible(u->read_waitq, !list_empty(&u->read_buffers)); if (ret) return ret; mutex_lock(&u->reply_mutex); } rb = list_entry(u->read_buffers.next, struct read_buffer, list); i = 0; while (i < len) { size_t sz = min_t(size_t, len - i, rb->len - rb->cons); ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); i += sz - ret; rb->cons += sz - ret; if (ret != 0) { if (i == 0) i = -EFAULT; goto out; } /* Clear out buffer if it has been consumed */ if (rb->cons == rb->len) { list_del(&rb->list); kfree(rb); if (list_empty(&u->read_buffers)) break; rb = list_entry(u->read_buffers.next, struct read_buffer, list); } } if (i == 0) goto again; out: mutex_unlock(&u->reply_mutex); return i; } /* * Add a buffer to the queue. Caller must hold the appropriate lock * if the queue is not local. (Commonly the caller will build up * multiple queued buffers on a temporary local list, and then add it * to the appropriate list under lock once all the buffers have een * successfully allocated.) */ static int queue_reply(struct list_head *queue, const void *data, size_t len) { struct read_buffer *rb; if (len == 0) return 0; if (len > XENSTORE_PAYLOAD_MAX) return -EINVAL; rb = kmalloc(struct_size(rb, msg, len), GFP_KERNEL); if (rb == NULL) return -ENOMEM; rb->cons = 0; rb->len = len; memcpy(rb->msg, data, len); list_add_tail(&rb->list, queue); return 0; } /* * Free all the read_buffer s on a list. * Caller must have sole reference to list. */ static void queue_cleanup(struct list_head *list) { struct read_buffer *rb; while (!list_empty(list)) { rb = list_entry(list->next, struct read_buffer, list); list_del(list->next); kfree(rb); } } struct watch_adapter { struct list_head list; struct xenbus_watch watch; struct xenbus_file_priv *dev_data; char *token; }; static void free_watch_adapter(struct watch_adapter *watch) { kfree(watch->watch.node); kfree(watch->token); kfree(watch); } static struct watch_adapter *alloc_watch_adapter(const char *path, const char *token) { struct watch_adapter *watch; watch = kzalloc(sizeof(*watch), GFP_KERNEL); if (watch == NULL) goto out_fail; watch->watch.node = kstrdup(path, GFP_KERNEL); if (watch->watch.node == NULL) goto out_free; watch->token = kstrdup(token, GFP_KERNEL); if (watch->token == NULL) goto out_free; return watch; out_free: free_watch_adapter(watch); out_fail: return NULL; } static void watch_fired(struct xenbus_watch *watch, const char *path, const char *token) { struct watch_adapter *adap; struct xsd_sockmsg hdr; const char *token_caller; int path_len, tok_len, body_len; int ret; LIST_HEAD(staging_q); adap = container_of(watch, struct watch_adapter, watch); token_caller = adap->token; path_len = strlen(path) + 1; tok_len = strlen(token_caller) + 1; body_len = path_len + tok_len; hdr.type = XS_WATCH_EVENT; hdr.len = body_len; mutex_lock(&adap->dev_data->reply_mutex); ret = queue_reply(&staging_q, &hdr, sizeof(hdr)); if (!ret) ret = queue_reply(&staging_q, path, path_len); if (!ret) ret = queue_reply(&staging_q, token_caller, tok_len); if (!ret) { /* success: pass reply list onto watcher */ list_splice_tail(&staging_q, &adap->dev_data->read_buffers); wake_up(&adap->dev_data->read_waitq); } else queue_cleanup(&staging_q); mutex_unlock(&adap->dev_data->reply_mutex); } static void xenbus_worker(struct work_struct *wq) { struct xenbus_file_priv *u; struct xenbus_transaction_holder *trans, *tmp; struct watch_adapter *watch, *tmp_watch; struct read_buffer *rb, *tmp_rb; u = container_of(wq, struct xenbus_file_priv, wq); /* * No need for locking here because there are no other users, * by definition. */ list_for_each_entry_safe(trans, tmp, &u->transactions, list) { xenbus_transaction_end(trans->handle, 1); list_del(&trans->list); kfree(trans); } list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); } list_for_each_entry_safe(rb, tmp_rb, &u->read_buffers, list) { list_del(&rb->list); kfree(rb); } kfree(u); } static void xenbus_file_free(struct kref *kref) { struct xenbus_file_priv *u; /* * We might be called in xenbus_thread(). * Use workqueue to avoid deadlock. */ u = container_of(kref, struct xenbus_file_priv, kref); schedule_work(&u->wq); } static struct xenbus_transaction_holder *xenbus_get_transaction( struct xenbus_file_priv *u, uint32_t tx_id) { struct xenbus_transaction_holder *trans; list_for_each_entry(trans, &u->transactions, list) if (trans->handle.id == tx_id) return trans; return NULL; } void xenbus_dev_queue_reply(struct xb_req_data *req) { struct xenbus_file_priv *u = req->par; struct xenbus_transaction_holder *trans = NULL; int rc; LIST_HEAD(staging_q); xs_request_exit(req); mutex_lock(&u->msgbuffer_mutex); if (req->type == XS_TRANSACTION_START) { trans = xenbus_get_transaction(u, 0); if (WARN_ON(!trans)) goto out; if (req->msg.type == XS_ERROR) { list_del(&trans->list); kfree(trans); } else { rc = kstrtou32(req->body, 10, &trans->handle.id); if (WARN_ON(rc)) goto out; } } else if (req->type == XS_TRANSACTION_END) { trans = xenbus_get_transaction(u, req->msg.tx_id); if (WARN_ON(!trans)) goto out; list_del(&trans->list); kfree(trans); } mutex_unlock(&u->msgbuffer_mutex); mutex_lock(&u->reply_mutex); rc = queue_reply(&staging_q, &req->msg, sizeof(req->msg)); if (!rc) rc = queue_reply(&staging_q, req->body, req->msg.len); if (!rc) { list_splice_tail(&staging_q, &u->read_buffers); wake_up(&u->read_waitq); } else { queue_cleanup(&staging_q); } mutex_unlock(&u->reply_mutex); kfree(req->body); kfree(req); kref_put(&u->kref, xenbus_file_free); return; out: mutex_unlock(&u->msgbuffer_mutex); } static int xenbus_command_reply(struct xenbus_file_priv *u, unsigned int msg_type, const char *reply) { struct { struct xsd_sockmsg hdr; char body[16]; } msg; int rc; msg.hdr = u->u.msg; msg.hdr.type = msg_type; msg.hdr.len = strlen(reply) + 1; if (msg.hdr.len > sizeof(msg.body)) return -E2BIG; memcpy(&msg.body, reply, msg.hdr.len); mutex_lock(&u->reply_mutex); rc = queue_reply(&u->read_buffers, &msg, sizeof(msg.hdr) + msg.hdr.len); wake_up(&u->read_waitq); mutex_unlock(&u->reply_mutex); if (!rc) kref_put(&u->kref, xenbus_file_free); return rc; } static int xenbus_write_transaction(unsigned msg_type, struct xenbus_file_priv *u) { int rc; struct xenbus_transaction_holder *trans = NULL; struct { struct xsd_sockmsg hdr; char body[]; } *msg = (void *)u->u.buffer; if (msg_type == XS_TRANSACTION_START) { trans = kzalloc(sizeof(*trans), GFP_KERNEL); if (!trans) { rc = -ENOMEM; goto out; } trans->generation_id = xb_dev_generation_id; list_add(&trans->list, &u->transactions); } else if (msg->hdr.tx_id != 0 && !xenbus_get_transaction(u, msg->hdr.tx_id)) return xenbus_command_reply(u, XS_ERROR, "ENOENT"); else if (msg_type == XS_TRANSACTION_END && !(msg->hdr.len == 2 && (!strcmp(msg->body, "T") || !strcmp(msg->body, "F")))) return xenbus_command_reply(u, XS_ERROR, "EINVAL"); else if (msg_type == XS_TRANSACTION_END) { trans = xenbus_get_transaction(u, msg->hdr.tx_id); if (trans && trans->generation_id != xb_dev_generation_id) { list_del(&trans->list); kfree(trans); if (!strcmp(msg->body, "T")) return xenbus_command_reply(u, XS_ERROR, "EAGAIN"); else return xenbus_command_reply(u, XS_TRANSACTION_END, "OK"); } } rc = xenbus_dev_request_and_reply(&msg->hdr, u); if (rc && trans) { list_del(&trans->list); kfree(trans); } out: return rc; } static int xenbus_write_watch(unsigned msg_type, struct xenbus_file_priv *u) { struct watch_adapter *watch; char *path, *token; int err, rc; path = u->u.buffer + sizeof(u->u.msg); token = memchr(path, 0, u->u.msg.len); if (token == NULL) { rc = xenbus_command_reply(u, XS_ERROR, "EINVAL"); goto out; } token++; if (memchr(token, 0, u->u.msg.len - (token - path)) == NULL) { rc = xenbus_command_reply(u, XS_ERROR, "EINVAL"); goto out; } if (msg_type == XS_WATCH) { watch = alloc_watch_adapter(path, token); if (watch == NULL) { rc = -ENOMEM; goto out; } watch->watch.callback = watch_fired; watch->dev_data = u; err = register_xenbus_watch(&watch->watch); if (err) { free_watch_adapter(watch); rc = err; goto out; } list_add(&watch->list, &u->watches); } else { list_for_each_entry(watch, &u->watches, list) { if (!strcmp(watch->token, token) && !strcmp(watch->watch.node, path)) { unregister_xenbus_watch(&watch->watch); list_del(&watch->list); free_watch_adapter(watch); break; } } } /* Success. Synthesize a reply to say all is OK. */ rc = xenbus_command_reply(u, msg_type, "OK"); out: return rc; } static ssize_t xenbus_file_write(struct file *filp, const char __user *ubuf, size_t len, loff_t *ppos) { struct xenbus_file_priv *u = filp->private_data; uint32_t msg_type; int rc = len; int ret; /* * We're expecting usermode to be writing properly formed * xenbus messages. If they write an incomplete message we * buffer it up. Once it is complete, we act on it. */ /* * Make sure concurrent writers can't stomp all over each * other's messages and make a mess of our partial message * buffer. We don't make any attemppt to stop multiple * writers from making a mess of each other's incomplete * messages; we're just trying to guarantee our own internal * consistency and make sure that single writes are handled * atomically. */ mutex_lock(&u->msgbuffer_mutex); /* Get this out of the way early to avoid confusion */ if (len == 0) goto out; /* Can't write a xenbus message larger we can buffer */ if (len > sizeof(u->u.buffer) - u->len) { /* On error, dump existing buffer */ u->len = 0; rc = -EINVAL; goto out; } ret = copy_from_user(u->u.buffer + u->len, ubuf, len); if (ret != 0) { rc = -EFAULT; goto out; } /* Deal with a partial copy. */ len -= ret; rc = len; u->len += len; /* Return if we haven't got a full message yet */ if (u->len < sizeof(u->u.msg)) goto out; /* not even the header yet */ /* If we're expecting a message that's larger than we can possibly send, dump what we have and return an error. */ if ((sizeof(u->u.msg) + u->u.msg.len) > sizeof(u->u.buffer)) { rc = -E2BIG; u->len = 0; goto out; } if (u->len < (sizeof(u->u.msg) + u->u.msg.len)) goto out; /* incomplete data portion */ /* * OK, now we have a complete message. Do something with it. */ kref_get(&u->kref); msg_type = u->u.msg.type; switch (msg_type) { case XS_WATCH: case XS_UNWATCH: /* (Un)Ask for some path to be watched for changes */ ret = xenbus_write_watch(msg_type, u); break; default: /* Send out a transaction */ ret = xenbus_write_transaction(msg_type, u); break; } if (ret != 0) { rc = ret; kref_put(&u->kref, xenbus_file_free); } /* Buffered message consumed */ u->len = 0; out: mutex_unlock(&u->msgbuffer_mutex); return rc; } static int xenbus_file_open(struct inode *inode, struct file *filp) { struct xenbus_file_priv *u; if (xen_store_evtchn == 0) return -ENOENT; stream_open(inode, filp); u = kzalloc(sizeof(*u), GFP_KERNEL); if (u == NULL) return -ENOMEM; kref_init(&u->kref); INIT_LIST_HEAD(&u->transactions); INIT_LIST_HEAD(&u->watches); INIT_LIST_HEAD(&u->read_buffers); init_waitqueue_head(&u->read_waitq); INIT_WORK(&u->wq, xenbus_worker); mutex_init(&u->reply_mutex); mutex_init(&u->msgbuffer_mutex); filp->private_data = u; return 0; } static int xenbus_file_release(struct inode *inode, struct file *filp) { struct xenbus_file_priv *u = filp->private_data; kref_put(&u->kref, xenbus_file_free); return 0; } static __poll_t xenbus_file_poll(struct file *file, poll_table *wait) { struct xenbus_file_priv *u = file->private_data; poll_wait(file, &u->read_waitq, wait); if (!list_empty(&u->read_buffers)) return EPOLLIN | EPOLLRDNORM; return 0; } const struct file_operations xen_xenbus_fops = { .read = xenbus_file_read, .write = xenbus_file_write, .open = xenbus_file_open, .release = xenbus_file_release, .poll = xenbus_file_poll, }; EXPORT_SYMBOL_GPL(xen_xenbus_fops); static struct miscdevice xenbus_dev = { .minor = MISC_DYNAMIC_MINOR, .name = "xen/xenbus", .fops = &xen_xenbus_fops, }; static int __init xenbus_init(void) { int err; if (!xen_domain()) return -ENODEV; err = misc_register(&xenbus_dev); if (err) pr_err("Could not register xenbus frontend device\n"); return err; } device_initcall(xenbus_init);
/* SPDX-License-Identifier: GPL-2.0 */ /* * Copyright (C) 2019 Arm Limited * Original author: Dave Martin <[email protected]> */ #ifndef ASSEMBLER_H #define ASSEMBLER_H #define NT_GNU_PROPERTY_TYPE_0 5 #define GNU_PROPERTY_AARCH64_FEATURE_1_AND 0xc0000000 /* Bits for GNU_PROPERTY_AARCH64_FEATURE_1_BTI */ #define GNU_PROPERTY_AARCH64_FEATURE_1_BTI (1U << 0) #define GNU_PROPERTY_AARCH64_FEATURE_1_PAC (1U << 1) .macro startfn name:req .globl \name \name: .macro endfn .size \name, . - \name .type \name, @function .purgem endfn .endm .endm .macro emit_aarch64_feature_1_and .pushsection .note.gnu.property, "a" .align 3 .long 2f - 1f .long 6f - 3f .long NT_GNU_PROPERTY_TYPE_0 1: .string "GNU" 2: .align 3 3: .long GNU_PROPERTY_AARCH64_FEATURE_1_AND .long 5f - 4f 4: #if BTI .long GNU_PROPERTY_AARCH64_FEATURE_1_PAC | \ GNU_PROPERTY_AARCH64_FEATURE_1_BTI #else .long 0 #endif 5: .align 3 6: .popsection .endm .macro paciasp hint 0x19 .endm .macro autiasp hint 0x1d .endm .macro __bti_ hint 0x20 .endm .macro __bti_c hint 0x22 .endm .macro __bti_j hint 0x24 .endm .macro __bti_jc hint 0x26 .endm .macro bti what= __bti_\what .endm #endif /* ! ASSEMBLER_H */
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _ASM_POWERPC_XOR_ALTIVEC_H #define _ASM_POWERPC_XOR_ALTIVEC_H #ifdef CONFIG_ALTIVEC void xor_altivec_2(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2); void xor_altivec_3(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3); void xor_altivec_4(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4); void xor_altivec_5(unsigned long bytes, unsigned long * __restrict p1, const unsigned long * __restrict p2, const unsigned long * __restrict p3, const unsigned long * __restrict p4, const unsigned long * __restrict p5); #endif #endif /* _ASM_POWERPC_XOR_ALTIVEC_H */
/* SPDX-License-Identifier: GPL-2.0 */ /* Copyright(c) 2009-2012 Realtek Corporation.*/ #ifndef __RTL8723E_TRX_H__ #define __RTL8723E_TRX_H__ #define TX_DESC_SIZE 64 #define TX_DESC_AGGR_SUBFRAME_SIZE 32 #define RX_DESC_SIZE 32 #define RX_DRV_INFO_SIZE_UNIT 8 #define TX_DESC_NEXT_DESC_OFFSET 40 #define USB_HWDESC_HEADER_LEN 32 #define CRCLENGTH 4 static inline void set_tx_desc_pkt_size(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, GENMASK(15, 0)); } static inline void set_tx_desc_offset(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, GENMASK(23, 16)); } static inline void set_tx_desc_bmc(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, BIT(24)); } static inline void set_tx_desc_htc(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, BIT(25)); } static inline void set_tx_desc_last_seg(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, BIT(26)); } static inline void set_tx_desc_first_seg(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, BIT(27)); } static inline void set_tx_desc_linip(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, BIT(28)); } static inline void set_tx_desc_own(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, BIT(31)); } static inline u32 get_tx_desc_own(__le32 *__pdesc) { return le32_get_bits(*__pdesc, BIT(31)); } static inline void set_tx_desc_macid(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 1), __val, GENMASK(4, 0)); } static inline void set_tx_desc_agg_break(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 1), __val, BIT(5)); } static inline void set_tx_desc_rdg_enable(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 1), __val, BIT(7)); } static inline void set_tx_desc_queue_sel(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 1), __val, GENMASK(12, 8)); } static inline void set_tx_desc_rate_id(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 1), __val, GENMASK(19, 16)); } static inline void set_tx_desc_sec_type(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 1), __val, GENMASK(23, 22)); } static inline void set_tx_desc_more_frag(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 2), __val, BIT(17)); } static inline void set_tx_desc_ampdu_density(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 2), __val, GENMASK(22, 20)); } static inline void set_tx_desc_seq(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 3), __val, GENMASK(27, 16)); } static inline void set_tx_desc_pkt_id(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 3), __val, GENMASK(31, 28)); } /* For RTL8723 */ static inline void set_tx_desc_hwseq_en_8723(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 3), __val, BIT(31)); } static inline void set_tx_desc_hwseq_sel_8723(__le32 *__txdesc, u32 __value) { le32p_replace_bits((__txdesc + 4), __value, GENMASK(7, 6)); } static inline void set_tx_desc_rts_rate(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, GENMASK(4, 0)); } static inline void set_tx_desc_use_rate(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, BIT(8)); } static inline void set_tx_desc_disable_fb(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, BIT(10)); } static inline void set_tx_desc_cts2self(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, BIT(11)); } static inline void set_tx_desc_rts_enable(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, BIT(12)); } static inline void set_tx_desc_hw_rts_enable(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, BIT(13)); } static inline void set_tx_desc_tx_sub_carrier(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, GENMASK(21, 20)); } static inline void set_tx_desc_data_bw(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, BIT(25)); } static inline void set_tx_desc_rts_short(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, BIT(26)); } static inline void set_tx_desc_rts_bw(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, BIT(27)); } static inline void set_tx_desc_rts_sc(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, GENMASK(29, 28)); } static inline void set_tx_desc_rts_stbc(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 4), __val, GENMASK(31, 30)); } static inline void set_tx_desc_tx_rate(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 5), __val, GENMASK(5, 0)); } static inline void set_tx_desc_data_shortgi(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 5), __val, BIT(6)); } static inline void set_tx_desc_data_rate_fb_limit(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 5), __val, GENMASK(12, 8)); } static inline void set_tx_desc_rts_rate_fb_limit(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 5), __val, GENMASK(16, 13)); } static inline void set_tx_desc_max_agg_num(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 6), __val, GENMASK(15, 11)); } static inline void set_tx_desc_tx_buffer_size(__le32 *__pdesc, u32 __val) { le32p_replace_bits((__pdesc + 7), __val, GENMASK(15, 0)); } static inline void set_tx_desc_tx_buffer_address(__le32 *__pdesc, u32 __val) { *(__pdesc + 8) = cpu_to_le32(__val); } static inline u32 get_tx_desc_tx_buffer_address(__le32 *__pdesc) { return le32_to_cpu(*(__pdesc + 8)); } static inline void set_tx_desc_next_desc_address(__le32 *__pdesc, u32 __val) { *(__pdesc + 10) = cpu_to_le32(__val); } static inline u32 get_rx_desc_pkt_len(__le32 *__pdesc) { return le32_get_bits(*__pdesc, GENMASK(13, 0)); } static inline u32 get_rx_desc_crc32(__le32 *__pdesc) { return le32_get_bits(*__pdesc, BIT(14)); } static inline u32 get_rx_desc_icv(__le32 *__pdesc) { return le32_get_bits(*__pdesc, BIT(15)); } static inline u32 get_rx_desc_drv_info_size(__le32 *__pdesc) { return le32_get_bits(*__pdesc, GENMASK(19, 16)); } static inline u32 get_rx_desc_shift(__le32 *__pdesc) { return le32_get_bits(*__pdesc, GENMASK(25, 24)); } static inline u32 get_rx_desc_physt(__le32 *__pdesc) { return le32_get_bits(*__pdesc, BIT(26)); } static inline u32 get_rx_desc_swdec(__le32 *__pdesc) { return le32_get_bits(*__pdesc, BIT(27)); } static inline u32 get_rx_desc_own(__le32 *__pdesc) { return le32_get_bits(*__pdesc, BIT(31)); } static inline void set_rx_desc_pkt_len(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, GENMASK(13, 0)); } static inline void set_rx_desc_eor(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, BIT(30)); } static inline void set_rx_desc_own(__le32 *__pdesc, u32 __val) { le32p_replace_bits(__pdesc, __val, BIT(31)); } static inline u32 get_rx_desc_paggr(__le32 *__pdesc) { return le32_get_bits(*(__pdesc + 1), BIT(14)); } static inline u32 get_rx_desc_faggr(__le32 *__pdesc) { return le32_get_bits(*(__pdesc + 1), BIT(15)); } static inline u32 get_rx_desc_rxmcs(__le32 *__pdesc) { return le32_get_bits(*(__pdesc + 3), GENMASK(5, 0)); } static inline u32 get_rx_desc_rxht(__le32 *__pdesc) { return le32_get_bits(*(__pdesc + 3), BIT(6)); } static inline u32 get_rx_desc_splcp(__le32 *__pdesc) { return le32_get_bits(*(__pdesc + 3), BIT(8)); } static inline u32 get_rx_desc_bw(__le32 *__pdesc) { return le32_get_bits(*(__pdesc + 3), BIT(9)); } static inline u32 get_rx_desc_tsfl(__le32 *__pdesc) { return le32_to_cpu(*(__pdesc + 5)); } static inline u32 get_rx_desc_buff_addr(__le32 *__pdesc) { return le32_to_cpu(*(__pdesc + 6)); } static inline void set_rx_desc_buff_addr(__le32 *__pdesc, u32 __val) { *(__pdesc + 6) = cpu_to_le32(__val); } static inline void clear_pci_tx_desc_content(__le32 *__pdesc, u32 _size) { if (_size > TX_DESC_NEXT_DESC_OFFSET) memset(__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); else memset(__pdesc, 0, _size); } struct rx_fwinfo_8723e { u8 gain_trsw[4]; u8 pwdb_all; u8 cfosho[4]; u8 cfotail[4]; s8 rxevm[2]; s8 rxsnr[4]; u8 pdsnr[2]; u8 csi_current[2]; u8 csi_target[2]; u8 sigevm; u8 max_ex_pwr; u8 ex_intf_flag:1; u8 sgi_en:1; u8 rxsc:2; u8 reserve:4; } __packed; struct tx_desc_8723e { u32 pktsize:16; u32 offset:8; u32 bmc:1; u32 htc:1; u32 lastseg:1; u32 firstseg:1; u32 linip:1; u32 noacm:1; u32 gf:1; u32 own:1; u32 macid:5; u32 agg_en:1; u32 bk:1; u32 rdg_en:1; u32 queuesel:5; u32 rd_nav_ext:1; u32 lsig_txop_en:1; u32 pifs:1; u32 rateid:4; u32 nav_usehdr:1; u32 en_descid:1; u32 sectype:2; u32 pktoffset:8; u32 rts_rc:6; u32 data_rc:6; u32 rsvd0:2; u32 bar_retryht:2; u32 rsvd1:1; u32 morefrag:1; u32 raw:1; u32 ccx:1; u32 ampdudensity:3; u32 rsvd2:1; u32 ant_sela:1; u32 ant_selb:1; u32 txant_cck:2; u32 txant_l:2; u32 txant_ht:2; u32 nextheadpage:8; u32 tailpage:8; u32 seq:12; u32 pktid:4; u32 rtsrate:5; u32 apdcfe:1; u32 qos:1; u32 hwseq_enable:1; u32 userrate:1; u32 dis_rtsfb:1; u32 dis_datafb:1; u32 cts2self:1; u32 rts_en:1; u32 hwrts_en:1; u32 portid:1; u32 rsvd3:3; u32 waitdcts:1; u32 cts2ap_en:1; u32 txsc:2; u32 stbc:2; u32 txshort:1; u32 txbw:1; u32 rtsshort:1; u32 rtsbw:1; u32 rtssc:2; u32 rtsstbc:2; u32 txrate:6; u32 shortgi:1; u32 ccxt:1; u32 txrate_fb_lmt:5; u32 rtsrate_fb_lmt:4; u32 retrylmt_en:1; u32 txretrylmt:6; u32 usb_txaggnum:8; u32 txagca:5; u32 txagcb:5; u32 usemaxlen:1; u32 maxaggnum:5; u32 mcsg1maxlen:4; u32 mcsg2maxlen:4; u32 mcsg3maxlen:4; u32 mcs7sgimaxlen:4; u32 txbuffersize:16; u32 mcsg4maxlen:4; u32 mcsg5maxlen:4; u32 mcsg6maxlen:4; u32 mcsg15sgimaxlen:4; u32 txbuffaddr; u32 txbufferaddr64; u32 nextdescaddress; u32 nextdescaddress64; u32 reserve_pass_pcie_mm_limit[4]; } __packed; struct rx_desc_8723e { u32 length:14; u32 crc32:1; u32 icverror:1; u32 drv_infosize:4; u32 security:3; u32 qos:1; u32 shift:2; u32 phystatus:1; u32 swdec:1; u32 lastseg:1; u32 firstseg:1; u32 eor:1; u32 own:1; u32 macid:5; u32 tid:4; u32 hwrsvd:5; u32 paggr:1; u32 faggr:1; u32 a1_fit:4; u32 a2_fit:4; u32 pam:1; u32 pwr:1; u32 moredata:1; u32 morefrag:1; u32 type:2; u32 mc:1; u32 bc:1; u32 seq:12; u32 frag:4; u32 nextpktlen:14; u32 nextind:1; u32 rsvd:1; u32 rxmcs:6; u32 rxht:1; u32 amsdu:1; u32 splcp:1; u32 bandwidth:1; u32 htc:1; u32 tcpchk_rpt:1; u32 ipcchk_rpt:1; u32 tcpchk_valid:1; u32 hwpcerr:1; u32 hwpcind:1; u32 iv0:16; u32 iv1; u32 tsfl; u32 bufferaddress; u32 bufferaddress64; } __packed; void rtl8723e_tx_fill_desc(struct ieee80211_hw *hw, struct ieee80211_hdr *hdr, u8 *pdesc, u8 *txbd, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, struct sk_buff *skb, u8 hw_queue, struct rtl_tcb_desc *ptcb_desc); bool rtl8723e_rx_query_desc(struct ieee80211_hw *hw, struct rtl_stats *status, struct ieee80211_rx_status *rx_status, u8 *pdesc, struct sk_buff *skb); void rtl8723e_set_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name, u8 *val); u64 rtl8723e_get_desc(struct ieee80211_hw *hw, u8 *pdesc, bool istx, u8 desc_name); bool rtl8723e_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue, u16 index); void rtl8723e_tx_polling(struct ieee80211_hw *hw, u8 hw_queue); void rtl8723e_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc, struct sk_buff *skb); #endif
/* mpiutil.ac - Utility functions for MPI * Copyright (C) 1998, 1999 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include "mpi-internal.h" /**************** * Note: It was a bad idea to use the number of limbs to allocate * because on a alpha the limbs are large but we normally need * integers of n bits - So we should change this to bits (or bytes). * * But mpi_alloc is used in a lot of places :-) */ MPI mpi_alloc(unsigned nlimbs) { MPI a; a = kmalloc(sizeof *a, GFP_KERNEL); if (!a) return a; if (nlimbs) { a->d = mpi_alloc_limb_space(nlimbs); if (!a->d) { kfree(a); return NULL; } } else { a->d = NULL; } a->alloced = nlimbs; a->nlimbs = 0; a->sign = 0; a->flags = 0; a->nbits = 0; return a; } EXPORT_SYMBOL_GPL(mpi_alloc); mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs) { size_t len = nlimbs * sizeof(mpi_limb_t); if (!len) return NULL; return kmalloc(len, GFP_KERNEL); } void mpi_free_limb_space(mpi_ptr_t a) { if (!a) return; kfree_sensitive(a); } void mpi_assign_limb_space(MPI a, mpi_ptr_t ap, unsigned nlimbs) { mpi_free_limb_space(a->d); a->d = ap; a->alloced = nlimbs; } /**************** * Resize the array of A to NLIMBS. the additional space is cleared * (set to 0) [done by m_realloc()] */ int mpi_resize(MPI a, unsigned nlimbs) { void *p; if (nlimbs <= a->alloced) return 0; /* no need to do it */ if (a->d) { p = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL); if (!p) return -ENOMEM; memcpy(p, a->d, a->alloced * sizeof(mpi_limb_t)); kfree_sensitive(a->d); a->d = p; } else { a->d = kcalloc(nlimbs, sizeof(mpi_limb_t), GFP_KERNEL); if (!a->d) return -ENOMEM; } a->alloced = nlimbs; return 0; } void mpi_free(MPI a) { if (!a) return; if (a->flags & 4) kfree_sensitive(a->d); else mpi_free_limb_space(a->d); if (a->flags & ~7) pr_info("invalid flag value in mpi\n"); kfree(a); } EXPORT_SYMBOL_GPL(mpi_free); /**************** * Note: This copy function should not interpret the MPI * but copy it transparently. */ MPI mpi_copy(MPI a) { int i; MPI b; if (a) { b = mpi_alloc(a->nlimbs); if (!b) return NULL; b->nlimbs = a->nlimbs; b->sign = a->sign; b->flags = a->flags; b->flags &= ~(16|32); /* Reset the immutable and constant flags. */ for (i = 0; i < b->nlimbs; i++) b->d[i] = a->d[i]; } else b = NULL; return b; } MODULE_DESCRIPTION("Multiprecision maths library"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2016 MediaTek Inc. * * Author: Chunfeng Yun <[email protected]> */ #include <linux/dma-mapping.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/pm_wakeirq.h> #include <linux/reset.h> #include "mtu3.h" #include "mtu3_dr.h" #include "mtu3_debug.h" /* u2-port0 should be powered on and enabled; */ int ssusb_check_clocks(struct ssusb_mtk *ssusb, u32 ex_clks) { void __iomem *ibase = ssusb->ippc_base; u32 value, check_val; int ret; check_val = ex_clks | SSUSB_SYS125_RST_B_STS | SSUSB_SYSPLL_STABLE | SSUSB_REF_RST_B_STS; ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS1, value, (check_val == (value & check_val)), 100, 20000); if (ret) { dev_err(ssusb->dev, "clks of sts1 are not stable!\n"); return ret; } ret = readl_poll_timeout(ibase + U3D_SSUSB_IP_PW_STS2, value, (value & SSUSB_U2_MAC_SYS_RST_B_STS), 100, 10000); if (ret) { dev_err(ssusb->dev, "mac2 clock is not stable\n"); return ret; } return 0; } static int wait_for_ip_sleep(struct ssusb_mtk *ssusb) { bool sleep_check = true; u32 value; int ret; if (!ssusb->is_host) sleep_check = ssusb_gadget_ip_sleep_check(ssusb); if (!sleep_check) return 0; /* wait for ip enter sleep mode */ ret = readl_poll_timeout(ssusb->ippc_base + U3D_SSUSB_IP_PW_STS1, value, (value & SSUSB_IP_SLEEP_STS), 100, 100000); if (ret) { dev_err(ssusb->dev, "ip sleep failed!!!\n"); ret = -EBUSY; } else { /* workaround: avoid wrong wakeup signal latch for some soc */ usleep_range(100, 200); } return ret; } static int ssusb_phy_init(struct ssusb_mtk *ssusb) { int i; int ret; for (i = 0; i < ssusb->num_phys; i++) { ret = phy_init(ssusb->phys[i]); if (ret) goto exit_phy; } return 0; exit_phy: for (; i > 0; i--) phy_exit(ssusb->phys[i - 1]); return ret; } static int ssusb_phy_exit(struct ssusb_mtk *ssusb) { int i; for (i = 0; i < ssusb->num_phys; i++) phy_exit(ssusb->phys[i]); return 0; } static int ssusb_phy_power_on(struct ssusb_mtk *ssusb) { int i; int ret; for (i = 0; i < ssusb->num_phys; i++) { ret = phy_power_on(ssusb->phys[i]); if (ret) goto power_off_phy; } return 0; power_off_phy: for (; i > 0; i--) phy_power_off(ssusb->phys[i - 1]); return ret; } static void ssusb_phy_power_off(struct ssusb_mtk *ssusb) { unsigned int i; for (i = 0; i < ssusb->num_phys; i++) phy_power_off(ssusb->phys[i]); } static int ssusb_rscs_init(struct ssusb_mtk *ssusb) { int ret = 0; ret = regulator_enable(ssusb->vusb33); if (ret) { dev_err(ssusb->dev, "failed to enable vusb33\n"); goto vusb33_err; } ret = clk_bulk_prepare_enable(BULK_CLKS_CNT, ssusb->clks); if (ret) goto clks_err; ret = ssusb_phy_init(ssusb); if (ret) { dev_err(ssusb->dev, "failed to init phy\n"); goto phy_init_err; } ret = ssusb_phy_power_on(ssusb); if (ret) { dev_err(ssusb->dev, "failed to power on phy\n"); goto phy_err; } return 0; phy_err: ssusb_phy_exit(ssusb); phy_init_err: clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks); clks_err: regulator_disable(ssusb->vusb33); vusb33_err: return ret; } static void ssusb_rscs_exit(struct ssusb_mtk *ssusb) { clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks); regulator_disable(ssusb->vusb33); ssusb_phy_power_off(ssusb); ssusb_phy_exit(ssusb); } static void ssusb_ip_sw_reset(struct ssusb_mtk *ssusb) { /* reset whole ip (xhci & u3d) */ mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST); udelay(1); mtu3_clrbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL0, SSUSB_IP_SW_RST); /* * device ip may be powered on in firmware/BROM stage before entering * kernel stage; * power down device ip, otherwise ip-sleep will fail when working as * host only mode */ mtu3_setbits(ssusb->ippc_base, U3D_SSUSB_IP_PW_CTRL2, SSUSB_IP_DEV_PDN); } static void ssusb_u3_drd_check(struct ssusb_mtk *ssusb) { struct otg_switch_mtk *otg_sx = &ssusb->otg_switch; u32 dev_u3p_num; u32 host_u3p_num; u32 value; /* u3 port0 is disabled */ if (ssusb->u3p_dis_msk & BIT(0)) { otg_sx->is_u3_drd = false; goto out; } value = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_DEV_CAP); dev_u3p_num = SSUSB_IP_DEV_U3_PORT_NUM(value); value = mtu3_readl(ssusb->ippc_base, U3D_SSUSB_IP_XHCI_CAP); host_u3p_num = SSUSB_IP_XHCI_U3_PORT_NUM(value); otg_sx->is_u3_drd = !!(dev_u3p_num && host_u3p_num); out: dev_info(ssusb->dev, "usb3-drd: %d\n", otg_sx->is_u3_drd); } static int get_ssusb_rscs(struct platform_device *pdev, struct ssusb_mtk *ssusb) { struct device_node *node = pdev->dev.of_node; struct otg_switch_mtk *otg_sx = &ssusb->otg_switch; struct clk_bulk_data *clks = ssusb->clks; struct device *dev = &pdev->dev; int i; int ret; ssusb->vusb33 = devm_regulator_get(dev, "vusb33"); if (IS_ERR(ssusb->vusb33)) { dev_err(dev, "failed to get vusb33\n"); return PTR_ERR(ssusb->vusb33); } clks[0].id = "sys_ck"; clks[1].id = "ref_ck"; clks[2].id = "mcu_ck"; clks[3].id = "dma_ck"; clks[4].id = "xhci_ck"; clks[5].id = "frmcnt_ck"; ret = devm_clk_bulk_get_optional(dev, BULK_CLKS_CNT, clks); if (ret) return ret; ssusb->num_phys = of_count_phandle_with_args(node, "phys", "#phy-cells"); if (ssusb->num_phys > 0) { ssusb->phys = devm_kcalloc(dev, ssusb->num_phys, sizeof(*ssusb->phys), GFP_KERNEL); if (!ssusb->phys) return -ENOMEM; } else { ssusb->num_phys = 0; } for (i = 0; i < ssusb->num_phys; i++) { ssusb->phys[i] = devm_of_phy_get_by_index(dev, node, i); if (IS_ERR(ssusb->phys[i])) { dev_err(dev, "failed to get phy-%d\n", i); return PTR_ERR(ssusb->phys[i]); } } ssusb->ippc_base = devm_platform_ioremap_resource_byname(pdev, "ippc"); if (IS_ERR(ssusb->ippc_base)) return PTR_ERR(ssusb->ippc_base); ssusb->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup"); if (ssusb->wakeup_irq == -EPROBE_DEFER) return ssusb->wakeup_irq; ssusb->dr_mode = usb_get_dr_mode(dev); if (ssusb->dr_mode == USB_DR_MODE_UNKNOWN) ssusb->dr_mode = USB_DR_MODE_OTG; of_property_read_u32(node, "mediatek,u3p-dis-msk", &ssusb->u3p_dis_msk); if (ssusb->dr_mode == USB_DR_MODE_PERIPHERAL) goto out; /* if host role is supported */ ret = ssusb_wakeup_of_property_parse(ssusb, node); if (ret) { dev_err(dev, "failed to parse uwk property\n"); return ret; } /* optional property, ignore the error if it does not exist */ of_property_read_u32(node, "mediatek,u2p-dis-msk", &ssusb->u2p_dis_msk); otg_sx->vbus = devm_regulator_get(dev, "vbus"); if (IS_ERR(otg_sx->vbus)) { dev_err(dev, "failed to get vbus\n"); return PTR_ERR(otg_sx->vbus); } if (ssusb->dr_mode == USB_DR_MODE_HOST) goto out; /* if dual-role mode is supported */ otg_sx->manual_drd_enabled = of_property_read_bool(node, "enable-manual-drd"); otg_sx->role_sw_used = of_property_read_bool(node, "usb-role-switch"); /* can't disable port0 when use dual-role mode */ ssusb->u2p_dis_msk &= ~0x1; if (otg_sx->role_sw_used || otg_sx->manual_drd_enabled) goto out; if (of_property_present(node, "extcon")) { otg_sx->edev = extcon_get_edev_by_phandle(ssusb->dev, 0); if (IS_ERR(otg_sx->edev)) { return dev_err_probe(dev, PTR_ERR(otg_sx->edev), "couldn't get extcon device\n"); } } out: dev_info(dev, "dr_mode: %d, drd: %s\n", ssusb->dr_mode, otg_sx->manual_drd_enabled ? "manual" : "auto"); dev_info(dev, "u2p_dis_msk: %x, u3p_dis_msk: %x\n", ssusb->u2p_dis_msk, ssusb->u3p_dis_msk); return 0; } static int mtu3_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; struct device *dev = &pdev->dev; struct ssusb_mtk *ssusb; int ret = -ENOMEM; /* all elements are set to ZERO as default value */ ssusb = devm_kzalloc(dev, sizeof(*ssusb), GFP_KERNEL); if (!ssusb) return -ENOMEM; ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) { dev_err(dev, "No suitable DMA config available\n"); return -ENOTSUPP; } platform_set_drvdata(pdev, ssusb); ssusb->dev = dev; ret = get_ssusb_rscs(pdev, ssusb); if (ret) return ret; ssusb_debugfs_create_root(ssusb); /* enable power domain */ pm_runtime_set_active(dev); pm_runtime_use_autosuspend(dev); pm_runtime_set_autosuspend_delay(dev, 4000); pm_runtime_enable(dev); pm_runtime_get_sync(dev); device_init_wakeup(dev, true); ret = ssusb_rscs_init(ssusb); if (ret) goto comm_init_err; if (ssusb->wakeup_irq > 0) { ret = dev_pm_set_dedicated_wake_irq_reverse(dev, ssusb->wakeup_irq); if (ret) { dev_err(dev, "failed to set wakeup irq %d\n", ssusb->wakeup_irq); goto comm_exit; } dev_info(dev, "wakeup irq %d\n", ssusb->wakeup_irq); } ret = device_reset_optional(dev); if (ret) { dev_err_probe(dev, ret, "failed to reset controller\n"); goto comm_exit; } ssusb_ip_sw_reset(ssusb); ssusb_u3_drd_check(ssusb); if (IS_ENABLED(CONFIG_USB_MTU3_HOST)) ssusb->dr_mode = USB_DR_MODE_HOST; else if (IS_ENABLED(CONFIG_USB_MTU3_GADGET)) ssusb->dr_mode = USB_DR_MODE_PERIPHERAL; /* default as host */ ssusb->is_host = !(ssusb->dr_mode == USB_DR_MODE_PERIPHERAL); switch (ssusb->dr_mode) { case USB_DR_MODE_PERIPHERAL: ret = ssusb_gadget_init(ssusb); if (ret) { dev_err(dev, "failed to initialize gadget\n"); goto comm_exit; } break; case USB_DR_MODE_HOST: ret = ssusb_host_init(ssusb, node); if (ret) { dev_err(dev, "failed to initialize host\n"); goto comm_exit; } break; case USB_DR_MODE_OTG: ret = ssusb_gadget_init(ssusb); if (ret) { dev_err(dev, "failed to initialize gadget\n"); goto comm_exit; } ret = ssusb_host_init(ssusb, node); if (ret) { dev_err(dev, "failed to initialize host\n"); goto gadget_exit; } ret = ssusb_otg_switch_init(ssusb); if (ret) { dev_err(dev, "failed to initialize switch\n"); goto host_exit; } break; default: dev_err(dev, "unsupported mode: %d\n", ssusb->dr_mode); ret = -EINVAL; goto comm_exit; } device_enable_async_suspend(dev); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); pm_runtime_forbid(dev); return 0; host_exit: ssusb_host_exit(ssusb); gadget_exit: ssusb_gadget_exit(ssusb); comm_exit: ssusb_rscs_exit(ssusb); comm_init_err: pm_runtime_put_noidle(dev); pm_runtime_disable(dev); ssusb_debugfs_remove_root(ssusb); return ret; } static void mtu3_remove(struct platform_device *pdev) { struct ssusb_mtk *ssusb = platform_get_drvdata(pdev); pm_runtime_get_sync(&pdev->dev); switch (ssusb->dr_mode) { case USB_DR_MODE_PERIPHERAL: ssusb_gadget_exit(ssusb); break; case USB_DR_MODE_HOST: ssusb_host_exit(ssusb); break; case USB_DR_MODE_OTG: ssusb_otg_switch_exit(ssusb); ssusb_gadget_exit(ssusb); ssusb_host_exit(ssusb); break; case USB_DR_MODE_UNKNOWN: /* * This cannot happen because with dr_mode == * USB_DR_MODE_UNKNOWN, .probe() doesn't succeed and so * .remove() wouldn't be called at all. However (little * surprising) the compiler isn't smart enough to see that, so * we explicitly have this case item to not make the compiler * wail about an unhandled enumeration value. */ break; } ssusb_rscs_exit(ssusb); ssusb_debugfs_remove_root(ssusb); pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); } static int resume_ip_and_ports(struct ssusb_mtk *ssusb, pm_message_t msg) { switch (ssusb->dr_mode) { case USB_DR_MODE_PERIPHERAL: ssusb_gadget_resume(ssusb, msg); break; case USB_DR_MODE_HOST: ssusb_host_resume(ssusb, false); break; case USB_DR_MODE_OTG: ssusb_host_resume(ssusb, !ssusb->is_host); if (!ssusb->is_host) ssusb_gadget_resume(ssusb, msg); break; default: return -EINVAL; } return 0; } static int mtu3_suspend_common(struct device *dev, pm_message_t msg) { struct ssusb_mtk *ssusb = dev_get_drvdata(dev); int ret = 0; dev_dbg(dev, "%s\n", __func__); switch (ssusb->dr_mode) { case USB_DR_MODE_PERIPHERAL: ret = ssusb_gadget_suspend(ssusb, msg); if (ret) goto err; break; case USB_DR_MODE_HOST: ssusb_host_suspend(ssusb); break; case USB_DR_MODE_OTG: if (!ssusb->is_host) { ret = ssusb_gadget_suspend(ssusb, msg); if (ret) goto err; } ssusb_host_suspend(ssusb); break; default: return -EINVAL; } ret = wait_for_ip_sleep(ssusb); if (ret) goto sleep_err; ssusb_phy_power_off(ssusb); clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks); ssusb_wakeup_set(ssusb, true); return 0; sleep_err: resume_ip_and_ports(ssusb, msg); err: return ret; } static int mtu3_resume_common(struct device *dev, pm_message_t msg) { struct ssusb_mtk *ssusb = dev_get_drvdata(dev); int ret; dev_dbg(dev, "%s\n", __func__); ssusb_wakeup_set(ssusb, false); ret = clk_bulk_prepare_enable(BULK_CLKS_CNT, ssusb->clks); if (ret) goto clks_err; ret = ssusb_phy_power_on(ssusb); if (ret) goto phy_err; return resume_ip_and_ports(ssusb, msg); phy_err: clk_bulk_disable_unprepare(BULK_CLKS_CNT, ssusb->clks); clks_err: return ret; } static int __maybe_unused mtu3_suspend(struct device *dev) { return mtu3_suspend_common(dev, PMSG_SUSPEND); } static int __maybe_unused mtu3_resume(struct device *dev) { return mtu3_resume_common(dev, PMSG_SUSPEND); } static int __maybe_unused mtu3_runtime_suspend(struct device *dev) { if (!device_may_wakeup(dev)) return 0; return mtu3_suspend_common(dev, PMSG_AUTO_SUSPEND); } static int __maybe_unused mtu3_runtime_resume(struct device *dev) { if (!device_may_wakeup(dev)) return 0; return mtu3_resume_common(dev, PMSG_AUTO_SUSPEND); } static const struct dev_pm_ops mtu3_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(mtu3_suspend, mtu3_resume) SET_RUNTIME_PM_OPS(mtu3_runtime_suspend, mtu3_runtime_resume, NULL) }; #define DEV_PM_OPS (IS_ENABLED(CONFIG_PM) ? &mtu3_pm_ops : NULL) static const struct of_device_id mtu3_of_match[] = { {.compatible = "mediatek,mt8173-mtu3",}, {.compatible = "mediatek,mtu3",}, {}, }; MODULE_DEVICE_TABLE(of, mtu3_of_match); static struct platform_driver mtu3_driver = { .probe = mtu3_probe, .remove = mtu3_remove, .driver = { .name = MTU3_DRIVER_NAME, .pm = DEV_PM_OPS, .of_match_table = mtu3_of_match, }, }; module_platform_driver(mtu3_driver); MODULE_AUTHOR("Chunfeng Yun <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MediaTek USB3 DRD Controller Driver");
// SPDX-License-Identifier: GPL-2.0-only /* * UFS PHY driver for Samsung SoC * * Copyright (C) 2020 Samsung Electronics Co., Ltd. * Author: Seungwon Jeon <[email protected]> * Author: Alim Akhtar <[email protected]> * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/of.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/module.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/soc/samsung/exynos-pmu.h> #include "phy-samsung-ufs.h" #define for_each_phy_lane(phy, i) \ for (i = 0; i < (phy)->lane_cnt; i++) #define for_each_phy_cfg(cfg) \ for (; (cfg)->id; (cfg)++) #define PHY_DEF_LANE_CNT 1 static void samsung_ufs_phy_config(struct samsung_ufs_phy *phy, const struct samsung_ufs_phy_cfg *cfg, u8 lane) { enum {LANE_0, LANE_1}; /* lane index */ switch (lane) { case LANE_0: writel(cfg->val, (phy)->reg_pma + cfg->off_0); break; case LANE_1: if (cfg->id == PHY_TRSV_BLK) writel(cfg->val, (phy)->reg_pma + cfg->off_1); break; } } int samsung_ufs_phy_wait_for_lock_acq(struct phy *phy, u8 lane) { struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy); const unsigned int timeout_us = 100000; const unsigned int sleep_us = 10; u32 val; int err; err = readl_poll_timeout( ufs_phy->reg_pma + PHY_APB_ADDR(PHY_PLL_LOCK_STATUS), val, (val & PHY_PLL_LOCK_BIT), sleep_us, timeout_us); if (err) { dev_err(ufs_phy->dev, "failed to get phy pll lock acquisition %d\n", err); goto out; } err = readl_poll_timeout( ufs_phy->reg_pma + PHY_APB_ADDR(ufs_phy->drvdata->cdr_lock_status_offset), val, (val & PHY_CDR_LOCK_BIT), sleep_us, timeout_us); if (err) dev_err(ufs_phy->dev, "failed to get phy cdr lock acquisition %d\n", err); out: return err; } static int samsung_ufs_phy_calibrate(struct phy *phy) { struct samsung_ufs_phy *ufs_phy = get_samsung_ufs_phy(phy); const struct samsung_ufs_phy_cfg * const *cfgs = ufs_phy->cfgs; const struct samsung_ufs_phy_cfg *cfg; int err = 0; int i; if (unlikely(ufs_phy->ufs_phy_state < CFG_PRE_INIT || ufs_phy->ufs_phy_state >= CFG_TAG_MAX)) { dev_err(ufs_phy->dev, "invalid phy config index %d\n", ufs_phy->ufs_phy_state); return -EINVAL; } cfg = cfgs[ufs_phy->ufs_phy_state]; if (!cfg) goto out; for_each_phy_cfg(cfg) { for_each_phy_lane(ufs_phy, i) { samsung_ufs_phy_config(ufs_phy, cfg, i); } } for_each_phy_lane(ufs_phy, i) { if (ufs_phy->ufs_phy_state == CFG_PRE_INIT && ufs_phy->drvdata->wait_for_cal) { err = ufs_phy->drvdata->wait_for_cal(phy, i); if (err) goto out; } if (ufs_phy->ufs_phy_state == CFG_POST_PWR_HS && ufs_phy->drvdata->wait_for_cdr) { err = ufs_phy->drvdata->wait_for_cdr(phy, i); if (err) goto out; } } /** * In Samsung ufshci, PHY need to be calibrated at different * stages / state mainly before Linkstartup, after Linkstartup, * before power mode change and after power mode change. * Below state machine to make sure to calibrate PHY in each * state. Here after configuring PHY in a given state, will * change the state to next state so that next state phy * calibration value can be programed */ out: switch (ufs_phy->ufs_phy_state) { case CFG_PRE_INIT: ufs_phy->ufs_phy_state = CFG_POST_INIT; break; case CFG_POST_INIT: ufs_phy->ufs_phy_state = CFG_PRE_PWR_HS; break; case CFG_PRE_PWR_HS: ufs_phy->ufs_phy_state = CFG_POST_PWR_HS; break; case CFG_POST_PWR_HS: /* Change back to INIT state */ ufs_phy->ufs_phy_state = CFG_PRE_INIT; break; default: dev_err(ufs_phy->dev, "wrong state for phy calibration\n"); } return err; } static int samsung_ufs_phy_clks_init(struct samsung_ufs_phy *phy) { int i; const struct samsung_ufs_phy_drvdata *drvdata = phy->drvdata; int num_clks = drvdata->num_clks; phy->clks = devm_kcalloc(phy->dev, num_clks, sizeof(*phy->clks), GFP_KERNEL); if (!phy->clks) return -ENOMEM; for (i = 0; i < num_clks; i++) phy->clks[i].id = drvdata->clk_list[i]; return devm_clk_bulk_get(phy->dev, num_clks, phy->clks); } static int samsung_ufs_phy_init(struct phy *phy) { struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy); ss_phy->lane_cnt = phy->attrs.bus_width; ss_phy->ufs_phy_state = CFG_PRE_INIT; return 0; } static int samsung_ufs_phy_power_on(struct phy *phy) { struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy); int ret; samsung_ufs_phy_ctrl_isol(ss_phy, false); ret = clk_bulk_prepare_enable(ss_phy->drvdata->num_clks, ss_phy->clks); if (ret) { dev_err(ss_phy->dev, "failed to enable ufs phy clocks\n"); return ret; } if (ss_phy->ufs_phy_state == CFG_PRE_INIT) { ret = samsung_ufs_phy_calibrate(phy); if (ret) dev_err(ss_phy->dev, "ufs phy calibration failed\n"); } return ret; } static int samsung_ufs_phy_power_off(struct phy *phy) { struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy); clk_bulk_disable_unprepare(ss_phy->drvdata->num_clks, ss_phy->clks); samsung_ufs_phy_ctrl_isol(ss_phy, true); return 0; } static int samsung_ufs_phy_set_mode(struct phy *generic_phy, enum phy_mode mode, int submode) { struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(generic_phy); ss_phy->mode = PHY_MODE_INVALID; if (mode > 0) ss_phy->mode = mode; return 0; } static int samsung_ufs_phy_exit(struct phy *phy) { struct samsung_ufs_phy *ss_phy = get_samsung_ufs_phy(phy); ss_phy->ufs_phy_state = CFG_TAG_MAX; return 0; } static const struct phy_ops samsung_ufs_phy_ops = { .init = samsung_ufs_phy_init, .exit = samsung_ufs_phy_exit, .power_on = samsung_ufs_phy_power_on, .power_off = samsung_ufs_phy_power_off, .calibrate = samsung_ufs_phy_calibrate, .set_mode = samsung_ufs_phy_set_mode, .owner = THIS_MODULE, }; static const struct of_device_id samsung_ufs_phy_match[]; static int samsung_ufs_phy_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; const struct of_device_id *match; struct samsung_ufs_phy *phy; struct phy *gen_phy; struct phy_provider *phy_provider; const struct samsung_ufs_phy_drvdata *drvdata; u32 isol_offset; int err = 0; match = of_match_node(samsung_ufs_phy_match, dev->of_node); if (!match) { err = -EINVAL; dev_err(dev, "failed to get match_node\n"); goto out; } phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL); if (!phy) { err = -ENOMEM; goto out; } phy->reg_pma = devm_platform_ioremap_resource_byname(pdev, "phy-pma"); if (IS_ERR(phy->reg_pma)) { err = PTR_ERR(phy->reg_pma); goto out; } phy->reg_pmu = exynos_get_pmu_regmap_by_phandle(dev->of_node, "samsung,pmu-syscon"); if (IS_ERR(phy->reg_pmu)) { err = PTR_ERR(phy->reg_pmu); dev_err(dev, "failed syscon remap for pmu\n"); goto out; } gen_phy = devm_phy_create(dev, NULL, &samsung_ufs_phy_ops); if (IS_ERR(gen_phy)) { err = PTR_ERR(gen_phy); dev_err(dev, "failed to create PHY for ufs-phy\n"); goto out; } drvdata = match->data; phy->dev = dev; phy->drvdata = drvdata; phy->cfgs = drvdata->cfgs; memcpy(&phy->isol, &drvdata->isol, sizeof(phy->isol)); if (!of_property_read_u32_index(dev->of_node, "samsung,pmu-syscon", 1, &isol_offset)) phy->isol.offset = isol_offset; phy->lane_cnt = PHY_DEF_LANE_CNT; err = samsung_ufs_phy_clks_init(phy); if (err) { dev_err(dev, "failed to get phy clocks\n"); goto out; } phy_set_drvdata(gen_phy, phy); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) { err = PTR_ERR(phy_provider); dev_err(dev, "failed to register phy-provider\n"); goto out; } out: return err; } static const struct of_device_id samsung_ufs_phy_match[] = { { .compatible = "google,gs101-ufs-phy", .data = &tensor_gs101_ufs_phy, }, { .compatible = "samsung,exynos7-ufs-phy", .data = &exynos7_ufs_phy, }, { .compatible = "samsung,exynosautov9-ufs-phy", .data = &exynosautov9_ufs_phy, }, { .compatible = "tesla,fsd-ufs-phy", .data = &fsd_ufs_phy, }, {}, }; MODULE_DEVICE_TABLE(of, samsung_ufs_phy_match); static struct platform_driver samsung_ufs_phy_driver = { .probe = samsung_ufs_phy_probe, .driver = { .name = "samsung-ufs-phy", .of_match_table = samsung_ufs_phy_match, }, }; module_platform_driver(samsung_ufs_phy_driver); MODULE_DESCRIPTION("Samsung SoC UFS PHY Driver"); MODULE_AUTHOR("Seungwon Jeon <[email protected]>"); MODULE_AUTHOR("Alim Akhtar <[email protected]>"); MODULE_LICENSE("GPL v2");
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2017, Gustavo Romero, IBM Corp. * * Check if thread endianness is flipped inadvertently to BE on trap * caught in TM whilst MSR.FP and MSR.VEC are zero (i.e. just after * load_fp and load_vec overflowed). * * The issue can be checked on LE machines simply by zeroing load_fp * and load_vec and then causing a trap in TM. Since the endianness * changes to BE on return from the signal handler, 'nop' is * thread as an illegal instruction in following sequence: * tbegin. * beq 1f * trap * tend. * 1: nop * * However, although the issue is also present on BE machines, it's a * bit trickier to check it on BE machines because MSR.LE bit is set * to zero which determines a BE endianness that is the native * endianness on BE machines, so nothing notably critical happens, * i.e. no illegal instruction is observed immediately after returning * from the signal handler (as it happens on LE machines). Thus to test * it on BE machines LE endianness is forced after a first trap and then * the endianness is verified on subsequent traps to determine if the * endianness "flipped back" to the native endianness (BE). */ #define _GNU_SOURCE #include <error.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <htmintrin.h> #include <inttypes.h> #include <pthread.h> #include <sched.h> #include <signal.h> #include <stdbool.h> #include "tm.h" #include "utils.h" #define pr_error(error_code, format, ...) \ error_at_line(1, error_code, __FILE__, __LINE__, format, ##__VA_ARGS__) #define MSR_LE 1UL #define LE 1UL pthread_t t0_ping; pthread_t t1_pong; int exit_from_pong; int trap_event; int le; bool success; void trap_signal_handler(int signo, siginfo_t *si, void *uc) { ucontext_t *ucp = uc; uint64_t thread_endianness; /* Get thread endianness: extract bit LE from MSR */ thread_endianness = MSR_LE & ucp->uc_mcontext.gp_regs[PT_MSR]; /* * Little-Endian Machine */ if (le) { /* First trap event */ if (trap_event == 0) { /* Do nothing. Since it is returning from this trap * event that endianness is flipped by the bug, so just * let the process return from the signal handler and * check on the second trap event if endianness is * flipped or not. */ } /* Second trap event */ else if (trap_event == 1) { /* * Since trap was caught in TM on first trap event, if * endianness was still LE (not flipped inadvertently) * after returning from the signal handler instruction * (1) is executed (basically a 'nop'), as it's located * at address of tbegin. +4 (rollback addr). As (1) on * LE endianness does in effect nothing, instruction (2) * is then executed again as 'trap', generating a second * trap event (note that in that case 'trap' is caught * not in transacional mode). On te other hand, if after * the return from the signal handler the endianness in- * advertently flipped, instruction (1) is tread as a * branch instruction, i.e. b .+8, hence instruction (3) * and (4) are executed (tbegin.; trap;) and we get sim- * ilaly on the trap signal handler, but now in TM mode. * Either way, it's now possible to check the MSR LE bit * once in the trap handler to verify if endianness was * flipped or not after the return from the second trap * event. If endianness is flipped, the bug is present. * Finally, getting a trap in TM mode or not is just * worth noting because it affects the math to determine * the offset added to the NIP on return: the NIP for a * trap caught in TM is the rollback address, i.e. the * next instruction after 'tbegin.', whilst the NIP for * a trap caught in non-transactional mode is the very * same address of the 'trap' instruction that generated * the trap event. */ if (thread_endianness == LE) { /* Go to 'success', i.e. instruction (6) */ ucp->uc_mcontext.gp_regs[PT_NIP] += 16; } else { /* * Thread endianness is BE, so it flipped * inadvertently. Thus we flip back to LE and * set NIP to go to 'failure', instruction (5). */ ucp->uc_mcontext.gp_regs[PT_MSR] |= 1UL; ucp->uc_mcontext.gp_regs[PT_NIP] += 4; } } } /* * Big-Endian Machine */ else { /* First trap event */ if (trap_event == 0) { /* * Force thread endianness to be LE. Instructions (1), * (3), and (4) will be executed, generating a second * trap in TM mode. */ ucp->uc_mcontext.gp_regs[PT_MSR] |= 1UL; } /* Second trap event */ else if (trap_event == 1) { /* * Do nothing. If bug is present on return from this * second trap event endianness will flip back "automat- * ically" to BE, otherwise thread endianness will * continue to be LE, just as it was set above. */ } /* A third trap event */ else { /* * Once here it means that after returning from the sec- * ond trap event instruction (4) (trap) was executed * as LE, generating a third trap event. In that case * endianness is still LE as set on return from the * first trap event, hence no bug. Otherwise, bug * flipped back to BE on return from the second trap * event and instruction (4) was executed as 'tdi' (so * basically a 'nop') and branch to 'failure' in * instruction (5) was taken to indicate failure and we * never get here. */ /* * Flip back to BE and go to instruction (6), i.e. go to * 'success'. */ ucp->uc_mcontext.gp_regs[PT_MSR] &= ~1UL; ucp->uc_mcontext.gp_regs[PT_NIP] += 8; } } trap_event++; } void usr1_signal_handler(int signo, siginfo_t *si, void *not_used) { /* Got a USR1 signal from ping(), so just tell pong() to exit */ exit_from_pong = 1; } void *ping(void *not_used) { uint64_t i; trap_event = 0; /* * Wait an amount of context switches so load_fp and load_vec overflows * and MSR_[FP|VEC|V] is 0. */ for (i = 0; i < 1024*1024*512; i++) ; asm goto( /* * [NA] means "Native Endianness", i.e. it tells how a * instruction is executed on machine's native endianness (in * other words, native endianness matches kernel endianness). * [OP] means "Opposite Endianness", i.e. on a BE machine, it * tells how a instruction is executed as a LE instruction; con- * versely, on a LE machine, it tells how a instruction is * executed as a BE instruction. When [NA] is omitted, it means * that the native interpretation of a given instruction is not * relevant for the test. Likewise when [OP] is omitted. */ " tbegin. ;" /* (0) tbegin. [NA] */ " tdi 0, 0, 0x48;" /* (1) nop [NA]; b (3) [OP] */ " trap ;" /* (2) trap [NA] */ ".long 0x1D05007C;" /* (3) tbegin. [OP] */ ".long 0x0800E07F;" /* (4) trap [OP]; nop [NA] */ " b %l[failure] ;" /* (5) b [NA]; MSR.LE flipped (bug) */ " b %l[success] ;" /* (6) b [NA]; MSR.LE did not flip (ok)*/ : : : : failure, success); failure: success = false; goto exit_from_ping; success: success = true; exit_from_ping: /* Tell pong() to exit before leaving */ pthread_kill(t1_pong, SIGUSR1); return NULL; } void *pong(void *not_used) { while (!exit_from_pong) /* * Induce context switches on ping() thread * until ping() finishes its job and signs * to exit from this loop. */ sched_yield(); return NULL; } int tm_trap_test(void) { uint16_t k = 1; int cpu, rc; pthread_attr_t attr; cpu_set_t cpuset; struct sigaction trap_sa; SKIP_IF(!have_htm()); SKIP_IF(htm_is_synthetic()); trap_sa.sa_flags = SA_SIGINFO; trap_sa.sa_sigaction = trap_signal_handler; sigaction(SIGTRAP, &trap_sa, NULL); struct sigaction usr1_sa; usr1_sa.sa_flags = SA_SIGINFO; usr1_sa.sa_sigaction = usr1_signal_handler; sigaction(SIGUSR1, &usr1_sa, NULL); cpu = pick_online_cpu(); FAIL_IF(cpu < 0); // Set only one CPU in the mask. Both threads will be bound to that CPU. CPU_ZERO(&cpuset); CPU_SET(cpu, &cpuset); /* Init pthread attribute */ rc = pthread_attr_init(&attr); if (rc) pr_error(rc, "pthread_attr_init()"); /* * Bind thread ping() and pong() both to CPU 0 so they ping-pong and * speed up context switches on ping() thread, speeding up the load_fp * and load_vec overflow. */ rc = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set_t), &cpuset); if (rc) pr_error(rc, "pthread_attr_setaffinity()"); /* Figure out the machine endianness */ le = (int) *(uint8_t *)&k; printf("%s machine detected. Checking if endianness flips %s", le ? "Little-Endian" : "Big-Endian", "inadvertently on trap in TM... "); rc = fflush(0); if (rc) pr_error(rc, "fflush()"); /* Launch ping() */ rc = pthread_create(&t0_ping, &attr, ping, NULL); if (rc) pr_error(rc, "pthread_create()"); exit_from_pong = 0; /* Launch pong() */ rc = pthread_create(&t1_pong, &attr, pong, NULL); if (rc) pr_error(rc, "pthread_create()"); rc = pthread_join(t0_ping, NULL); if (rc) pr_error(rc, "pthread_join()"); rc = pthread_join(t1_pong, NULL); if (rc) pr_error(rc, "pthread_join()"); if (success) { printf("no.\n"); /* no, endianness did not flip inadvertently */ return EXIT_SUCCESS; } printf("yes!\n"); /* yes, endianness did flip inadvertently */ return EXIT_FAILURE; } int main(int argc, char **argv) { return test_harness(tm_trap_test, "tm_trap_test"); }
/* * Copyright (c) 2010-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" #include "hw-ops.h" #include "ar9003_phy.h" #include "ar9003_rtt.h" #include "ar9003_mci.h" #define MAX_MEASUREMENT MAX_IQCAL_MEASUREMENT #define MAX_MAG_DELTA 11 #define MAX_PHS_DELTA 10 #define MAXIQCAL 3 struct coeff { int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL]; int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MAXIQCAL]; int iqc_coeff[2]; }; enum ar9003_cal_types { IQ_MISMATCH_CAL = BIT(0), }; static void ar9003_hw_setup_calibration(struct ath_hw *ah, struct ath9k_cal_list *currCal) { struct ath_common *common = ath9k_hw_common(ah); /* Select calibration to run */ switch (currCal->calData->calType) { case IQ_MISMATCH_CAL: /* * Start calibration with * 2^(INIT_IQCAL_LOG_COUNT_MAX+1) samples */ REG_RMW_FIELD(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_IQCAL_LOG_COUNT_MAX, currCal->calData->calCountMax); REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); ath_dbg(common, CALIBRATE, "starting IQ Mismatch Calibration\n"); /* Kick-off cal */ REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL); break; default: ath_err(common, "Invalid calibration type\n"); break; } } /* * Generic calibration routine. * Recalibrate the lower PHY chips to account for temperature/environment * changes. */ static bool ar9003_hw_per_calibration(struct ath_hw *ah, struct ath9k_channel *ichan, u8 rxchainmask, struct ath9k_cal_list *currCal) { struct ath9k_hw_cal_data *caldata = ah->caldata; const struct ath9k_percal_data *cur_caldata = currCal->calData; /* Calibration in progress. */ if (currCal->calState == CAL_RUNNING) { /* Check to see if it has finished. */ if (REG_READ(ah, AR_PHY_TIMING4) & AR_PHY_TIMING4_DO_CAL) return false; /* * Accumulate cal measures for active chains */ cur_caldata->calCollect(ah); ah->cal_samples++; if (ah->cal_samples >= cur_caldata->calNumSamples) { unsigned int i, numChains = 0; for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (rxchainmask & (1 << i)) numChains++; } /* * Process accumulated data */ cur_caldata->calPostProc(ah, numChains); /* Calibration has finished. */ caldata->CalValid |= cur_caldata->calType; currCal->calState = CAL_DONE; return true; } else { /* * Set-up collection of another sub-sample until we * get desired number */ ar9003_hw_setup_calibration(ah, currCal); } } else if (!(caldata->CalValid & cur_caldata->calType)) { /* If current cal is marked invalid in channel, kick it off */ ath9k_hw_reset_calibration(ah, currCal); } return false; } static int ar9003_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan, u8 rxchainmask, bool longcal) { bool iscaldone = true; struct ath9k_cal_list *currCal = ah->cal_list_curr; int ret; /* * For given calibration: * 1. Call generic cal routine * 2. When this cal is done (isCalDone) if we have more cals waiting * (eg after reset), mask this to upper layers by not propagating * isCalDone if it is set to TRUE. * Instead, change isCalDone to FALSE and setup the waiting cal(s) * to be run. */ if (currCal && (currCal->calState == CAL_RUNNING || currCal->calState == CAL_WAITING)) { iscaldone = ar9003_hw_per_calibration(ah, chan, rxchainmask, currCal); if (iscaldone) { ah->cal_list_curr = currCal = currCal->calNext; if (currCal->calState == CAL_WAITING) { iscaldone = false; ath9k_hw_reset_calibration(ah, currCal); } } } /* * Do NF cal only at longer intervals. Get the value from * the previous NF cal and update history buffer. */ if (longcal && ath9k_hw_getnf(ah, chan)) { /* * Load the NF from history buffer of the current channel. * NF is slow time-variant, so it is OK to use a historical * value. */ ret = ath9k_hw_loadnf(ah, ah->curchan); if (ret < 0) return ret; /* start NF calibration, without updating BB NF register */ ath9k_hw_start_nfcal(ah, false); } return iscaldone; } static void ar9003_hw_iqcal_collect(struct ath_hw *ah) { int i; /* Accumulate IQ cal measures for active chains */ for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (ah->txchainmask & BIT(i)) { ah->totalPowerMeasI[i] += REG_READ(ah, AR_PHY_CAL_MEAS_0(i)); ah->totalPowerMeasQ[i] += REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); ah->totalIqCorrMeas[i] += (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); ath_dbg(ath9k_hw_common(ah), CALIBRATE, "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", ah->cal_samples, i, ah->totalPowerMeasI[i], ah->totalPowerMeasQ[i], ah->totalIqCorrMeas[i]); } } } static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) { struct ath_common *common = ath9k_hw_common(ah); u32 powerMeasQ, powerMeasI, iqCorrMeas; u32 qCoffDenom, iCoffDenom; int32_t qCoff, iCoff; int iqCorrNeg, i; static const u_int32_t offset_array[3] = { AR_PHY_RX_IQCAL_CORR_B0, AR_PHY_RX_IQCAL_CORR_B1, AR_PHY_RX_IQCAL_CORR_B2, }; for (i = 0; i < numChains; i++) { powerMeasI = ah->totalPowerMeasI[i]; powerMeasQ = ah->totalPowerMeasQ[i]; iqCorrMeas = ah->totalIqCorrMeas[i]; ath_dbg(common, CALIBRATE, "Starting IQ Cal and Correction for Chain %d\n", i); ath_dbg(common, CALIBRATE, "Original: Chn %d iq_corr_meas = 0x%08x\n", i, ah->totalIqCorrMeas[i]); iqCorrNeg = 0; if (iqCorrMeas > 0x80000000) { iqCorrMeas = (0xffffffff - iqCorrMeas) + 1; iqCorrNeg = 1; } ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); ath_dbg(common, CALIBRATE, "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); ath_dbg(common, CALIBRATE, "iqCorrNeg is 0x%08x\n", iqCorrNeg); iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256; qCoffDenom = powerMeasQ / 64; if ((iCoffDenom != 0) && (qCoffDenom != 0)) { iCoff = iqCorrMeas / iCoffDenom; qCoff = powerMeasI / qCoffDenom - 64; ath_dbg(common, CALIBRATE, "Chn %d iCoff = 0x%08x\n", i, iCoff); ath_dbg(common, CALIBRATE, "Chn %d qCoff = 0x%08x\n", i, qCoff); /* Force bounds on iCoff */ if (iCoff >= 63) iCoff = 63; else if (iCoff <= -63) iCoff = -63; /* Negate iCoff if iqCorrNeg == 0 */ if (iqCorrNeg == 0x0) iCoff = -iCoff; /* Force bounds on qCoff */ if (qCoff >= 63) qCoff = 63; else if (qCoff <= -63) qCoff = -63; iCoff = iCoff & 0x7f; qCoff = qCoff & 0x7f; ath_dbg(common, CALIBRATE, "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", i, iCoff, qCoff); ath_dbg(common, CALIBRATE, "Register offset (0x%04x) before update = 0x%x\n", offset_array[i], REG_READ(ah, offset_array[i])); if (AR_SREV_9565(ah) && (iCoff == 63 || qCoff == 63 || iCoff == -63 || qCoff == -63)) return; REG_RMW_FIELD(ah, offset_array[i], AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF, iCoff); REG_RMW_FIELD(ah, offset_array[i], AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF, qCoff); ath_dbg(common, CALIBRATE, "Register offset (0x%04x) QI COFF (bitfields 0x%08x) after update = 0x%x\n", offset_array[i], AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF, REG_READ(ah, offset_array[i])); ath_dbg(common, CALIBRATE, "Register offset (0x%04x) QQ COFF (bitfields 0x%08x) after update = 0x%x\n", offset_array[i], AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF, REG_READ(ah, offset_array[i])); ath_dbg(common, CALIBRATE, "IQ Cal and Correction done for Chain %d\n", i); } } REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0, AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE); ath_dbg(common, CALIBRATE, "IQ Cal and Correction (offset 0x%04x) enabled (bit position 0x%08x). New Value 0x%08x\n", (unsigned) (AR_PHY_RX_IQCAL_CORR_B0), AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE, REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0)); } static const struct ath9k_percal_data iq_cal_single_sample = { IQ_MISMATCH_CAL, MIN_CAL_SAMPLES, PER_MAX_LOG_COUNT, ar9003_hw_iqcal_collect, ar9003_hw_iqcalibrate }; static void ar9003_hw_init_cal_settings(struct ath_hw *ah) { ah->iq_caldata.calData = &iq_cal_single_sample; if (AR_SREV_9300_20_OR_LATER(ah)) { ah->enabled_cals |= TX_IQ_CAL; if (AR_SREV_9485_OR_LATER(ah) && !AR_SREV_9340(ah)) ah->enabled_cals |= TX_IQ_ON_AGC_CAL; } ah->supp_cals = IQ_MISMATCH_CAL; } #define OFF_UPPER_LT 24 #define OFF_LOWER_LT 7 static bool ar9003_hw_dynamic_osdac_selection(struct ath_hw *ah, bool txiqcal_done) { struct ath_common *common = ath9k_hw_common(ah); int ch0_done, osdac_ch0, dc_off_ch0_i1, dc_off_ch0_q1, dc_off_ch0_i2, dc_off_ch0_q2, dc_off_ch0_i3, dc_off_ch0_q3; int ch1_done, osdac_ch1, dc_off_ch1_i1, dc_off_ch1_q1, dc_off_ch1_i2, dc_off_ch1_q2, dc_off_ch1_i3, dc_off_ch1_q3; int ch2_done, osdac_ch2, dc_off_ch2_i1, dc_off_ch2_q1, dc_off_ch2_i2, dc_off_ch2_q2, dc_off_ch2_i3, dc_off_ch2_q3; bool status; u32 temp, val; /* * Clear offset and IQ calibration, run AGC cal. */ REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_OFFSET_CAL); REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL); status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); if (!status) { ath_dbg(common, CALIBRATE, "AGC cal without offset cal failed to complete in 1ms"); return false; } /* * Allow only offset calibration and disable the others * (Carrier Leak calibration, TX Filter calibration and * Peak Detector offset calibration). */ REG_SET_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_OFFSET_CAL); REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_FLTR_CAL); REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_PKDET_CAL); ch0_done = 0; ch1_done = 0; ch2_done = 0; while ((ch0_done == 0) || (ch1_done == 0) || (ch2_done == 0)) { osdac_ch0 = (REG_READ(ah, AR_PHY_65NM_CH0_BB1) >> 30) & 0x3; osdac_ch1 = (REG_READ(ah, AR_PHY_65NM_CH1_BB1) >> 30) & 0x3; osdac_ch2 = (REG_READ(ah, AR_PHY_65NM_CH2_BB1) >> 30) & 0x3; REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL); status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); if (!status) { ath_dbg(common, CALIBRATE, "DC offset cal failed to complete in 1ms"); return false; } REG_CLR_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); /* * High gain. */ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3, ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (1 << 8))); REG_WRITE(ah, AR_PHY_65NM_CH1_BB3, ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (1 << 8))); REG_WRITE(ah, AR_PHY_65NM_CH2_BB3, ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (1 << 8))); temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3); dc_off_ch0_i1 = (temp >> 26) & 0x1f; dc_off_ch0_q1 = (temp >> 21) & 0x1f; temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3); dc_off_ch1_i1 = (temp >> 26) & 0x1f; dc_off_ch1_q1 = (temp >> 21) & 0x1f; temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3); dc_off_ch2_i1 = (temp >> 26) & 0x1f; dc_off_ch2_q1 = (temp >> 21) & 0x1f; /* * Low gain. */ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3, ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (2 << 8))); REG_WRITE(ah, AR_PHY_65NM_CH1_BB3, ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (2 << 8))); REG_WRITE(ah, AR_PHY_65NM_CH2_BB3, ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (2 << 8))); temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3); dc_off_ch0_i2 = (temp >> 26) & 0x1f; dc_off_ch0_q2 = (temp >> 21) & 0x1f; temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3); dc_off_ch1_i2 = (temp >> 26) & 0x1f; dc_off_ch1_q2 = (temp >> 21) & 0x1f; temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3); dc_off_ch2_i2 = (temp >> 26) & 0x1f; dc_off_ch2_q2 = (temp >> 21) & 0x1f; /* * Loopback. */ REG_WRITE(ah, AR_PHY_65NM_CH0_BB3, ((REG_READ(ah, AR_PHY_65NM_CH0_BB3) & 0xfffffcff) | (3 << 8))); REG_WRITE(ah, AR_PHY_65NM_CH1_BB3, ((REG_READ(ah, AR_PHY_65NM_CH1_BB3) & 0xfffffcff) | (3 << 8))); REG_WRITE(ah, AR_PHY_65NM_CH2_BB3, ((REG_READ(ah, AR_PHY_65NM_CH2_BB3) & 0xfffffcff) | (3 << 8))); temp = REG_READ(ah, AR_PHY_65NM_CH0_BB3); dc_off_ch0_i3 = (temp >> 26) & 0x1f; dc_off_ch0_q3 = (temp >> 21) & 0x1f; temp = REG_READ(ah, AR_PHY_65NM_CH1_BB3); dc_off_ch1_i3 = (temp >> 26) & 0x1f; dc_off_ch1_q3 = (temp >> 21) & 0x1f; temp = REG_READ(ah, AR_PHY_65NM_CH2_BB3); dc_off_ch2_i3 = (temp >> 26) & 0x1f; dc_off_ch2_q3 = (temp >> 21) & 0x1f; if ((dc_off_ch0_i1 > OFF_UPPER_LT) || (dc_off_ch0_i1 < OFF_LOWER_LT) || (dc_off_ch0_i2 > OFF_UPPER_LT) || (dc_off_ch0_i2 < OFF_LOWER_LT) || (dc_off_ch0_i3 > OFF_UPPER_LT) || (dc_off_ch0_i3 < OFF_LOWER_LT) || (dc_off_ch0_q1 > OFF_UPPER_LT) || (dc_off_ch0_q1 < OFF_LOWER_LT) || (dc_off_ch0_q2 > OFF_UPPER_LT) || (dc_off_ch0_q2 < OFF_LOWER_LT) || (dc_off_ch0_q3 > OFF_UPPER_LT) || (dc_off_ch0_q3 < OFF_LOWER_LT)) { if (osdac_ch0 == 3) { ch0_done = 1; } else { osdac_ch0++; val = REG_READ(ah, AR_PHY_65NM_CH0_BB1) & 0x3fffffff; val |= (osdac_ch0 << 30); REG_WRITE(ah, AR_PHY_65NM_CH0_BB1, val); ch0_done = 0; } } else { ch0_done = 1; } if ((dc_off_ch1_i1 > OFF_UPPER_LT) || (dc_off_ch1_i1 < OFF_LOWER_LT) || (dc_off_ch1_i2 > OFF_UPPER_LT) || (dc_off_ch1_i2 < OFF_LOWER_LT) || (dc_off_ch1_i3 > OFF_UPPER_LT) || (dc_off_ch1_i3 < OFF_LOWER_LT) || (dc_off_ch1_q1 > OFF_UPPER_LT) || (dc_off_ch1_q1 < OFF_LOWER_LT) || (dc_off_ch1_q2 > OFF_UPPER_LT) || (dc_off_ch1_q2 < OFF_LOWER_LT) || (dc_off_ch1_q3 > OFF_UPPER_LT) || (dc_off_ch1_q3 < OFF_LOWER_LT)) { if (osdac_ch1 == 3) { ch1_done = 1; } else { osdac_ch1++; val = REG_READ(ah, AR_PHY_65NM_CH1_BB1) & 0x3fffffff; val |= (osdac_ch1 << 30); REG_WRITE(ah, AR_PHY_65NM_CH1_BB1, val); ch1_done = 0; } } else { ch1_done = 1; } if ((dc_off_ch2_i1 > OFF_UPPER_LT) || (dc_off_ch2_i1 < OFF_LOWER_LT) || (dc_off_ch2_i2 > OFF_UPPER_LT) || (dc_off_ch2_i2 < OFF_LOWER_LT) || (dc_off_ch2_i3 > OFF_UPPER_LT) || (dc_off_ch2_i3 < OFF_LOWER_LT) || (dc_off_ch2_q1 > OFF_UPPER_LT) || (dc_off_ch2_q1 < OFF_LOWER_LT) || (dc_off_ch2_q2 > OFF_UPPER_LT) || (dc_off_ch2_q2 < OFF_LOWER_LT) || (dc_off_ch2_q3 > OFF_UPPER_LT) || (dc_off_ch2_q3 < OFF_LOWER_LT)) { if (osdac_ch2 == 3) { ch2_done = 1; } else { osdac_ch2++; val = REG_READ(ah, AR_PHY_65NM_CH2_BB1) & 0x3fffffff; val |= (osdac_ch2 << 30); REG_WRITE(ah, AR_PHY_65NM_CH2_BB1, val); ch2_done = 0; } } else { ch2_done = 1; } } REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_OFFSET_CAL); REG_SET_BIT(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); /* * We don't need to check txiqcal_done here since it is always * set for AR9550. */ REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); return true; } /* * solve 4x4 linear equation used in loopback iq cal. */ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah, s32 sin_2phi_1, s32 cos_2phi_1, s32 sin_2phi_2, s32 cos_2phi_2, s32 mag_a0_d0, s32 phs_a0_d0, s32 mag_a1_d0, s32 phs_a1_d0, s32 solved_eq[]) { s32 f1 = cos_2phi_1 - cos_2phi_2, f3 = sin_2phi_1 - sin_2phi_2, f2; s32 mag_tx, phs_tx, mag_rx, phs_rx; const s32 result_shift = 1 << 15; struct ath_common *common = ath9k_hw_common(ah); f2 = ((f1 >> 3) * (f1 >> 3) + (f3 >> 3) * (f3 >> 3)) >> 9; if (!f2) { ath_dbg(common, CALIBRATE, "Divide by 0\n"); return false; } /* mag mismatch, tx */ mag_tx = f1 * (mag_a0_d0 - mag_a1_d0) + f3 * (phs_a0_d0 - phs_a1_d0); /* phs mismatch, tx */ phs_tx = f3 * (-mag_a0_d0 + mag_a1_d0) + f1 * (phs_a0_d0 - phs_a1_d0); mag_tx = (mag_tx / f2); phs_tx = (phs_tx / f2); /* mag mismatch, rx */ mag_rx = mag_a0_d0 - (cos_2phi_1 * mag_tx + sin_2phi_1 * phs_tx) / result_shift; /* phs mismatch, rx */ phs_rx = phs_a0_d0 + (sin_2phi_1 * mag_tx - cos_2phi_1 * phs_tx) / result_shift; solved_eq[0] = mag_tx; solved_eq[1] = phs_tx; solved_eq[2] = mag_rx; solved_eq[3] = phs_rx; return true; } static s32 ar9003_hw_find_mag_approx(struct ath_hw *ah, s32 in_re, s32 in_im) { s32 abs_i = abs(in_re), abs_q = abs(in_im), max_abs, min_abs; if (abs_i > abs_q) { max_abs = abs_i; min_abs = abs_q; } else { max_abs = abs_q; min_abs = abs_i; } return max_abs - (max_abs / 32) + (min_abs / 8) + (min_abs / 4); } #define DELPT 32 static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah, s32 chain_idx, const s32 iq_res[], s32 iqc_coeff[]) { s32 i2_m_q2_a0_d0, i2_p_q2_a0_d0, iq_corr_a0_d0, i2_m_q2_a0_d1, i2_p_q2_a0_d1, iq_corr_a0_d1, i2_m_q2_a1_d0, i2_p_q2_a1_d0, iq_corr_a1_d0, i2_m_q2_a1_d1, i2_p_q2_a1_d1, iq_corr_a1_d1; s32 mag_a0_d0, mag_a1_d0, mag_a0_d1, mag_a1_d1, phs_a0_d0, phs_a1_d0, phs_a0_d1, phs_a1_d1, sin_2phi_1, cos_2phi_1, sin_2phi_2, cos_2phi_2; s32 mag_tx, phs_tx, mag_rx, phs_rx; s32 solved_eq[4], mag_corr_tx, phs_corr_tx, mag_corr_rx, phs_corr_rx, q_q_coff, q_i_coff; const s32 res_scale = 1 << 15; const s32 delpt_shift = 1 << 8; s32 mag1, mag2; struct ath_common *common = ath9k_hw_common(ah); i2_m_q2_a0_d0 = iq_res[0] & 0xfff; i2_p_q2_a0_d0 = (iq_res[0] >> 12) & 0xfff; iq_corr_a0_d0 = ((iq_res[0] >> 24) & 0xff) + ((iq_res[1] & 0xf) << 8); if (i2_m_q2_a0_d0 > 0x800) i2_m_q2_a0_d0 = -((0xfff - i2_m_q2_a0_d0) + 1); if (i2_p_q2_a0_d0 > 0x800) i2_p_q2_a0_d0 = -((0xfff - i2_p_q2_a0_d0) + 1); if (iq_corr_a0_d0 > 0x800) iq_corr_a0_d0 = -((0xfff - iq_corr_a0_d0) + 1); i2_m_q2_a0_d1 = (iq_res[1] >> 4) & 0xfff; i2_p_q2_a0_d1 = (iq_res[2] & 0xfff); iq_corr_a0_d1 = (iq_res[2] >> 12) & 0xfff; if (i2_m_q2_a0_d1 > 0x800) i2_m_q2_a0_d1 = -((0xfff - i2_m_q2_a0_d1) + 1); if (iq_corr_a0_d1 > 0x800) iq_corr_a0_d1 = -((0xfff - iq_corr_a0_d1) + 1); i2_m_q2_a1_d0 = ((iq_res[2] >> 24) & 0xff) + ((iq_res[3] & 0xf) << 8); i2_p_q2_a1_d0 = (iq_res[3] >> 4) & 0xfff; iq_corr_a1_d0 = iq_res[4] & 0xfff; if (i2_m_q2_a1_d0 > 0x800) i2_m_q2_a1_d0 = -((0xfff - i2_m_q2_a1_d0) + 1); if (i2_p_q2_a1_d0 > 0x800) i2_p_q2_a1_d0 = -((0xfff - i2_p_q2_a1_d0) + 1); if (iq_corr_a1_d0 > 0x800) iq_corr_a1_d0 = -((0xfff - iq_corr_a1_d0) + 1); i2_m_q2_a1_d1 = (iq_res[4] >> 12) & 0xfff; i2_p_q2_a1_d1 = ((iq_res[4] >> 24) & 0xff) + ((iq_res[5] & 0xf) << 8); iq_corr_a1_d1 = (iq_res[5] >> 4) & 0xfff; if (i2_m_q2_a1_d1 > 0x800) i2_m_q2_a1_d1 = -((0xfff - i2_m_q2_a1_d1) + 1); if (i2_p_q2_a1_d1 > 0x800) i2_p_q2_a1_d1 = -((0xfff - i2_p_q2_a1_d1) + 1); if (iq_corr_a1_d1 > 0x800) iq_corr_a1_d1 = -((0xfff - iq_corr_a1_d1) + 1); if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) || (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) { ath_dbg(common, CALIBRATE, "Divide by 0:\n" "a0_d0=%d\n" "a0_d1=%d\n" "a2_d0=%d\n" "a1_d1=%d\n", i2_p_q2_a0_d0, i2_p_q2_a0_d1, i2_p_q2_a1_d0, i2_p_q2_a1_d1); return false; } if ((i2_p_q2_a0_d0 < 1024) || (i2_p_q2_a0_d0 > 2047) || (i2_p_q2_a1_d0 < 0) || (i2_p_q2_a1_d1 < 0) || (i2_p_q2_a0_d0 <= i2_m_q2_a0_d0) || (i2_p_q2_a0_d0 <= iq_corr_a0_d0) || (i2_p_q2_a0_d1 <= i2_m_q2_a0_d1) || (i2_p_q2_a0_d1 <= iq_corr_a0_d1) || (i2_p_q2_a1_d0 <= i2_m_q2_a1_d0) || (i2_p_q2_a1_d0 <= iq_corr_a1_d0) || (i2_p_q2_a1_d1 <= i2_m_q2_a1_d1) || (i2_p_q2_a1_d1 <= iq_corr_a1_d1)) { return false; } mag_a0_d0 = (i2_m_q2_a0_d0 * res_scale) / i2_p_q2_a0_d0; phs_a0_d0 = (iq_corr_a0_d0 * res_scale) / i2_p_q2_a0_d0; mag_a0_d1 = (i2_m_q2_a0_d1 * res_scale) / i2_p_q2_a0_d1; phs_a0_d1 = (iq_corr_a0_d1 * res_scale) / i2_p_q2_a0_d1; mag_a1_d0 = (i2_m_q2_a1_d0 * res_scale) / i2_p_q2_a1_d0; phs_a1_d0 = (iq_corr_a1_d0 * res_scale) / i2_p_q2_a1_d0; mag_a1_d1 = (i2_m_q2_a1_d1 * res_scale) / i2_p_q2_a1_d1; phs_a1_d1 = (iq_corr_a1_d1 * res_scale) / i2_p_q2_a1_d1; /* w/o analog phase shift */ sin_2phi_1 = (((mag_a0_d0 - mag_a0_d1) * delpt_shift) / DELPT); /* w/o analog phase shift */ cos_2phi_1 = (((phs_a0_d1 - phs_a0_d0) * delpt_shift) / DELPT); /* w/ analog phase shift */ sin_2phi_2 = (((mag_a1_d0 - mag_a1_d1) * delpt_shift) / DELPT); /* w/ analog phase shift */ cos_2phi_2 = (((phs_a1_d1 - phs_a1_d0) * delpt_shift) / DELPT); /* * force sin^2 + cos^2 = 1; * find magnitude by approximation */ mag1 = ar9003_hw_find_mag_approx(ah, cos_2phi_1, sin_2phi_1); mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2); if ((mag1 == 0) || (mag2 == 0)) { ath_dbg(common, CALIBRATE, "Divide by 0: mag1=%d, mag2=%d\n", mag1, mag2); return false; } /* normalization sin and cos by mag */ sin_2phi_1 = (sin_2phi_1 * res_scale / mag1); cos_2phi_1 = (cos_2phi_1 * res_scale / mag1); sin_2phi_2 = (sin_2phi_2 * res_scale / mag2); cos_2phi_2 = (cos_2phi_2 * res_scale / mag2); /* calculate IQ mismatch */ if (!ar9003_hw_solve_iq_cal(ah, sin_2phi_1, cos_2phi_1, sin_2phi_2, cos_2phi_2, mag_a0_d0, phs_a0_d0, mag_a1_d0, phs_a1_d0, solved_eq)) { ath_dbg(common, CALIBRATE, "Call to ar9003_hw_solve_iq_cal() failed\n"); return false; } mag_tx = solved_eq[0]; phs_tx = solved_eq[1]; mag_rx = solved_eq[2]; phs_rx = solved_eq[3]; ath_dbg(common, CALIBRATE, "chain %d: mag mismatch=%d phase mismatch=%d\n", chain_idx, mag_tx/res_scale, phs_tx/res_scale); if (res_scale == mag_tx) { ath_dbg(common, CALIBRATE, "Divide by 0: mag_tx=%d, res_scale=%d\n", mag_tx, res_scale); return false; } /* calculate and quantize Tx IQ correction factor */ mag_corr_tx = (mag_tx * res_scale) / (res_scale - mag_tx); phs_corr_tx = -phs_tx; q_q_coff = (mag_corr_tx * 128 / res_scale); q_i_coff = (phs_corr_tx * 256 / res_scale); ath_dbg(common, CALIBRATE, "tx chain %d: mag corr=%d phase corr=%d\n", chain_idx, q_q_coff, q_i_coff); if (q_i_coff < -63) q_i_coff = -63; if (q_i_coff > 63) q_i_coff = 63; if (q_q_coff < -63) q_q_coff = -63; if (q_q_coff > 63) q_q_coff = 63; iqc_coeff[0] = (q_q_coff * 128) + (0x7f & q_i_coff); ath_dbg(common, CALIBRATE, "tx chain %d: iq corr coeff=%x\n", chain_idx, iqc_coeff[0]); if (-mag_rx == res_scale) { ath_dbg(common, CALIBRATE, "Divide by 0: mag_rx=%d, res_scale=%d\n", mag_rx, res_scale); return false; } /* calculate and quantize Rx IQ correction factors */ mag_corr_rx = (-mag_rx * res_scale) / (res_scale + mag_rx); phs_corr_rx = -phs_rx; q_q_coff = (mag_corr_rx * 128 / res_scale); q_i_coff = (phs_corr_rx * 256 / res_scale); ath_dbg(common, CALIBRATE, "rx chain %d: mag corr=%d phase corr=%d\n", chain_idx, q_q_coff, q_i_coff); if (q_i_coff < -63) q_i_coff = -63; if (q_i_coff > 63) q_i_coff = 63; if (q_q_coff < -63) q_q_coff = -63; if (q_q_coff > 63) q_q_coff = 63; iqc_coeff[1] = (q_q_coff * 128) + (0x7f & q_i_coff); ath_dbg(common, CALIBRATE, "rx chain %d: iq corr coeff=%x\n", chain_idx, iqc_coeff[1]); return true; } static void ar9003_hw_detect_outlier(int mp_coeff[][MAXIQCAL], int nmeasurement, int max_delta) { int mp_max = -64, max_idx = 0; int mp_min = 63, min_idx = 0; int mp_avg = 0, i, outlier_idx = 0, mp_count = 0; /* find min/max mismatch across all calibrated gains */ for (i = 0; i < nmeasurement; i++) { if (mp_coeff[i][0] > mp_max) { mp_max = mp_coeff[i][0]; max_idx = i; } else if (mp_coeff[i][0] < mp_min) { mp_min = mp_coeff[i][0]; min_idx = i; } } /* find average (exclude max abs value) */ for (i = 0; i < nmeasurement; i++) { if ((abs(mp_coeff[i][0]) < abs(mp_max)) || (abs(mp_coeff[i][0]) < abs(mp_min))) { mp_avg += mp_coeff[i][0]; mp_count++; } } /* * finding mean magnitude/phase if possible, otherwise * just use the last value as the mean */ if (mp_count) mp_avg /= mp_count; else mp_avg = mp_coeff[nmeasurement - 1][0]; /* detect outlier */ if (abs(mp_max - mp_min) > max_delta) { if (abs(mp_max - mp_avg) > abs(mp_min - mp_avg)) outlier_idx = max_idx; else outlier_idx = min_idx; mp_coeff[outlier_idx][0] = mp_avg; } } static void ar9003_hw_tx_iq_cal_outlier_detection(struct ath_hw *ah, struct coeff *coeff, bool is_reusable) { int i, im, nmeasurement; int magnitude, phase; u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS]; struct ath9k_hw_cal_data *caldata = ah->caldata; memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff)); for (i = 0; i < MAX_MEASUREMENT / 2; i++) { tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] = AR_PHY_TX_IQCAL_CORR_COEFF_B0(ah, i); if (!AR_SREV_9485(ah)) { tx_corr_coeff[i * 2][1] = tx_corr_coeff[(i * 2) + 1][1] = AR_PHY_TX_IQCAL_CORR_COEFF_B1(i); tx_corr_coeff[i * 2][2] = tx_corr_coeff[(i * 2) + 1][2] = AR_PHY_TX_IQCAL_CORR_COEFF_B2(i); } } /* Load the average of 2 passes */ for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (!(ah->txchainmask & (1 << i))) continue; nmeasurement = REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_STATUS_B0(ah), AR_PHY_CALIBRATED_GAINS_0); if (nmeasurement > MAX_MEASUREMENT) nmeasurement = MAX_MEASUREMENT; /* * Skip normal outlier detection for AR9550. */ if (!AR_SREV_9550(ah)) { /* detect outlier only if nmeasurement > 1 */ if (nmeasurement > 1) { /* Detect magnitude outlier */ ar9003_hw_detect_outlier(coeff->mag_coeff[i], nmeasurement, MAX_MAG_DELTA); /* Detect phase outlier */ ar9003_hw_detect_outlier(coeff->phs_coeff[i], nmeasurement, MAX_PHS_DELTA); } } for (im = 0; im < nmeasurement; im++) { magnitude = coeff->mag_coeff[i][im][0]; phase = coeff->phs_coeff[i][im][0]; coeff->iqc_coeff[0] = (phase & 0x7f) | ((magnitude & 0x7f) << 7); if ((im % 2) == 0) REG_RMW_FIELD(ah, tx_corr_coeff[im][i], AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE, coeff->iqc_coeff[0]); else REG_RMW_FIELD(ah, tx_corr_coeff[im][i], AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE, coeff->iqc_coeff[0]); if (caldata) caldata->tx_corr_coeff[im][i] = coeff->iqc_coeff[0]; } if (caldata) caldata->num_measures[i] = nmeasurement; } REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3, AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1); REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0, AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1); if (caldata) { if (is_reusable) set_bit(TXIQCAL_DONE, &caldata->cal_flags); else clear_bit(TXIQCAL_DONE, &caldata->cal_flags); } return; } static bool ar9003_hw_tx_iq_cal_run(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u8 tx_gain_forced; tx_gain_forced = REG_READ_FIELD(ah, AR_PHY_TX_FORCED_GAIN, AR_PHY_TXGAIN_FORCE); if (tx_gain_forced) REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN, AR_PHY_TXGAIN_FORCE, 0); REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START(ah), AR_PHY_TX_IQCAL_START_DO_CAL, 1); if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START(ah), AR_PHY_TX_IQCAL_START_DO_CAL, 0, AH_WAIT_TIMEOUT)) { ath_dbg(common, CALIBRATE, "Tx IQ Cal is not completed\n"); return false; } return true; } static void __ar955x_tx_iq_cal_sort(struct ath_hw *ah, struct coeff *coeff, int i, int nmeasurement) { struct ath_common *common = ath9k_hw_common(ah); int im, ix, iy; for (im = 0; im < nmeasurement; im++) { for (ix = 0; ix < MAXIQCAL - 1; ix++) { for (iy = ix + 1; iy <= MAXIQCAL - 1; iy++) { if (coeff->mag_coeff[i][im][iy] < coeff->mag_coeff[i][im][ix]) { swap(coeff->mag_coeff[i][im][ix], coeff->mag_coeff[i][im][iy]); } if (coeff->phs_coeff[i][im][iy] < coeff->phs_coeff[i][im][ix]) { swap(coeff->phs_coeff[i][im][ix], coeff->phs_coeff[i][im][iy]); } } } coeff->mag_coeff[i][im][0] = coeff->mag_coeff[i][im][MAXIQCAL / 2]; coeff->phs_coeff[i][im][0] = coeff->phs_coeff[i][im][MAXIQCAL / 2]; ath_dbg(common, CALIBRATE, "IQCAL: Median [ch%d][gain%d]: mag = %d phase = %d\n", i, im, coeff->mag_coeff[i][im][0], coeff->phs_coeff[i][im][0]); } } static bool ar955x_tx_iq_cal_median(struct ath_hw *ah, struct coeff *coeff, int iqcal_idx, int nmeasurement) { int i; if ((iqcal_idx + 1) != MAXIQCAL) return false; for (i = 0; i < AR9300_MAX_CHAINS; i++) { __ar955x_tx_iq_cal_sort(ah, coeff, i, nmeasurement); } return true; } static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah, int iqcal_idx, bool is_reusable) { struct ath_common *common = ath9k_hw_common(ah); const u32 txiqcal_status[AR9300_MAX_CHAINS] = { AR_PHY_TX_IQCAL_STATUS_B0(ah), AR_PHY_TX_IQCAL_STATUS_B1, AR_PHY_TX_IQCAL_STATUS_B2, }; const u_int32_t chan_info_tab[] = { AR_PHY_CHAN_INFO_TAB_0, AR_PHY_CHAN_INFO_TAB_1, AR_PHY_CHAN_INFO_TAB_2, }; static struct coeff coeff; s32 iq_res[6]; int i, im, j; int nmeasurement = 0; bool outlier_detect = true; for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (!(ah->txchainmask & (1 << i))) continue; nmeasurement = REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_STATUS_B0(ah), AR_PHY_CALIBRATED_GAINS_0); if (nmeasurement > MAX_MEASUREMENT) nmeasurement = MAX_MEASUREMENT; for (im = 0; im < nmeasurement; im++) { ath_dbg(common, CALIBRATE, "Doing Tx IQ Cal for chain %d\n", i); if (REG_READ(ah, txiqcal_status[i]) & AR_PHY_TX_IQCAL_STATUS_FAILED) { ath_dbg(common, CALIBRATE, "Tx IQ Cal failed for chain %d\n", i); goto tx_iqcal_fail; } for (j = 0; j < 3; j++) { u32 idx = 2 * j, offset = 4 * (3 * im + j); REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY(ah), AR_PHY_CHAN_INFO_TAB_S2_READ, 0); /* 32 bits */ iq_res[idx] = REG_READ(ah, chan_info_tab[i] + offset); REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY(ah), AR_PHY_CHAN_INFO_TAB_S2_READ, 1); /* 16 bits */ iq_res[idx + 1] = 0xffff & REG_READ(ah, chan_info_tab[i] + offset); ath_dbg(common, CALIBRATE, "IQ_RES[%d]=0x%x IQ_RES[%d]=0x%x\n", idx, iq_res[idx], idx + 1, iq_res[idx + 1]); } if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, coeff.iqc_coeff)) { ath_dbg(common, CALIBRATE, "Failed in calculation of IQ correction\n"); goto tx_iqcal_fail; } coeff.phs_coeff[i][im][iqcal_idx] = coeff.iqc_coeff[0] & 0x7f; coeff.mag_coeff[i][im][iqcal_idx] = (coeff.iqc_coeff[0] >> 7) & 0x7f; if (coeff.mag_coeff[i][im][iqcal_idx] > 63) coeff.mag_coeff[i][im][iqcal_idx] -= 128; if (coeff.phs_coeff[i][im][iqcal_idx] > 63) coeff.phs_coeff[i][im][iqcal_idx] -= 128; } } if (AR_SREV_9550(ah)) outlier_detect = ar955x_tx_iq_cal_median(ah, &coeff, iqcal_idx, nmeasurement); if (outlier_detect) ar9003_hw_tx_iq_cal_outlier_detection(ah, &coeff, is_reusable); return; tx_iqcal_fail: ath_dbg(common, CALIBRATE, "Tx IQ Cal failed\n"); return; } static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah) { struct ath9k_hw_cal_data *caldata = ah->caldata; u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS]; int i, im; memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff)); for (i = 0; i < MAX_MEASUREMENT / 2; i++) { tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] = AR_PHY_TX_IQCAL_CORR_COEFF_B0(ah, i); if (!AR_SREV_9485(ah)) { tx_corr_coeff[i * 2][1] = tx_corr_coeff[(i * 2) + 1][1] = AR_PHY_TX_IQCAL_CORR_COEFF_B1(i); tx_corr_coeff[i * 2][2] = tx_corr_coeff[(i * 2) + 1][2] = AR_PHY_TX_IQCAL_CORR_COEFF_B2(i); } } for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (!(ah->txchainmask & (1 << i))) continue; for (im = 0; im < caldata->num_measures[i]; im++) { if ((im % 2) == 0) REG_RMW_FIELD(ah, tx_corr_coeff[im][i], AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE, caldata->tx_corr_coeff[im][i]); else REG_RMW_FIELD(ah, tx_corr_coeff[im][i], AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE, caldata->tx_corr_coeff[im][i]); } } REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3, AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1); REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0, AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1); } static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) { int offset[8] = {0}, total = 0, test; int agc_out, i, peak_detect_threshold = 0; if (AR_SREV_9550(ah) || AR_SREV_9531(ah)) peak_detect_threshold = 8; else if (AR_SREV_9561(ah)) peak_detect_threshold = 11; /* * Turn off LNA/SW. */ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1); REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0); if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9330_11(ah)) { if (is_2g) REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0); else REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0); } /* * Turn off RXON. */ REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), AR_PHY_65NM_RXTX2_RXON_OVR, 0x1); REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), AR_PHY_65NM_RXTX2_RXON, 0x0); /* * Turn on AGC for cal. */ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1); REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC_ON_OVR, 0x1); REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1); if (AR_SREV_9330_11(ah)) REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0); if (is_2g) REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, peak_detect_threshold); else REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, peak_detect_threshold); for (i = 6; i > 0; i--) { offset[i] = BIT(i - 1); test = total + offset[i]; if (is_2g) REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, test); else REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, test); udelay(100); agc_out = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC_OUT); offset[i] = (agc_out) ? 0 : 1; total += (offset[i] << (i - 1)); } if (is_2g) REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, total); else REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total); /* * Turn on LNA. */ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0); /* * Turn off RXON. */ REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), AR_PHY_65NM_RXTX2_RXON_OVR, 0); /* * Turn off peak detect calibration. */ REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0); } static void ar9003_hw_do_pcoem_manual_peak_cal(struct ath_hw *ah, struct ath9k_channel *chan, bool run_rtt_cal) { struct ath9k_hw_cal_data *caldata = ah->caldata; int i; if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && !run_rtt_cal) return; for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (!(ah->rxchainmask & (1 << i))) continue; ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan)); } if (caldata) set_bit(SW_PKDET_DONE, &caldata->cal_flags); if ((ah->caps.hw_caps & ATH9K_HW_CAP_RTT) && caldata) { if (IS_CHAN_2GHZ(chan)){ caldata->caldac[0] = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0), AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR); caldata->caldac[1] = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1), AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR); } else { caldata->caldac[0] = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(0), AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR); caldata->caldac[1] = REG_READ_FIELD(ah, AR_PHY_65NM_RXRF_AGC(1), AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR); } } } static void ar9003_hw_cl_cal_post_proc(struct ath_hw *ah, bool is_reusable) { u32 cl_idx[AR9300_MAX_CHAINS] = { AR_PHY_CL_TAB_0, AR_PHY_CL_TAB_1, AR_PHY_CL_TAB_2 }; struct ath9k_hw_cal_data *caldata = ah->caldata; bool txclcal_done = false; int i, j; if (!caldata || !(ah->enabled_cals & TX_CL_CAL)) return; txclcal_done = !!(REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) & AR_PHY_AGC_CONTROL_CLC_SUCCESS); if (test_bit(TXCLCAL_DONE, &caldata->cal_flags)) { for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (!(ah->txchainmask & (1 << i))) continue; for (j = 0; j < MAX_CL_TAB_ENTRY; j++) REG_WRITE(ah, CL_TAB_ENTRY(cl_idx[i]), caldata->tx_clcal[i][j]); } } else if (is_reusable && txclcal_done) { for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (!(ah->txchainmask & (1 << i))) continue; for (j = 0; j < MAX_CL_TAB_ENTRY; j++) caldata->tx_clcal[i][j] = REG_READ(ah, CL_TAB_ENTRY(cl_idx[i])); } set_bit(TXCLCAL_DONE, &caldata->cal_flags); } } static void ar9003_hw_init_cal_common(struct ath_hw *ah) { struct ath9k_hw_cal_data *caldata = ah->caldata; /* Initialize list pointers */ ah->cal_list = ah->cal_list_last = ah->cal_list_curr = NULL; INIT_CAL(&ah->iq_caldata); INSERT_CAL(ah, &ah->iq_caldata); /* Initialize current pointer to first element in list */ ah->cal_list_curr = ah->cal_list; if (ah->cal_list_curr) ath9k_hw_reset_calibration(ah, ah->cal_list_curr); if (caldata) caldata->CalValid = 0; } static bool ar9003_hw_init_cal_pcoem(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); struct ath9k_hw_cal_data *caldata = ah->caldata; bool txiqcal_done = false; bool is_reusable = true, status = true; bool run_rtt_cal = false, run_agc_cal; bool rtt = !!(ah->caps.hw_caps & ATH9K_HW_CAP_RTT); u32 rx_delay = 0; u32 agc_ctrl = 0, agc_supp_cals = AR_PHY_AGC_CONTROL_OFFSET_CAL | AR_PHY_AGC_CONTROL_FLTR_CAL | AR_PHY_AGC_CONTROL_PKDET_CAL; /* Use chip chainmask only for calibration */ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); if (rtt) { if (!ar9003_hw_rtt_restore(ah, chan)) run_rtt_cal = true; if (run_rtt_cal) ath_dbg(common, CALIBRATE, "RTT calibration to be done\n"); } run_agc_cal = run_rtt_cal; if (run_rtt_cal) { ar9003_hw_rtt_enable(ah); ar9003_hw_rtt_set_mask(ah, 0x00); ar9003_hw_rtt_clear_hist(ah); } if (rtt) { if (!run_rtt_cal) { agc_ctrl = REG_READ(ah, AR_PHY_AGC_CONTROL(ah)); agc_supp_cals &= agc_ctrl; agc_ctrl &= ~(AR_PHY_AGC_CONTROL_OFFSET_CAL | AR_PHY_AGC_CONTROL_FLTR_CAL | AR_PHY_AGC_CONTROL_PKDET_CAL); REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), agc_ctrl); } else { if (ah->ah_flags & AH_FASTCC) run_agc_cal = true; } } if (ah->enabled_cals & TX_CL_CAL) { if (caldata && test_bit(TXCLCAL_DONE, &caldata->cal_flags)) REG_CLR_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); else { REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); run_agc_cal = true; } } if ((IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan)) || !(ah->enabled_cals & TX_IQ_CAL)) goto skip_tx_iqcal; /* Do Tx IQ Calibration */ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1(ah), AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT, DELPT); /* * For AR9485 or later chips, TxIQ cal runs as part of * AGC calibration */ if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) { if (caldata && !test_bit(TXIQCAL_DONE, &caldata->cal_flags)) REG_SET_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); else REG_CLR_BIT(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL); txiqcal_done = run_agc_cal = true; } skip_tx_iqcal: if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal) ar9003_mci_init_cal_req(ah, &is_reusable); if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) { rx_delay = REG_READ(ah, AR_PHY_RX_DELAY); /* Disable BB_active */ REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); udelay(5); REG_WRITE(ah, AR_PHY_RX_DELAY, AR_PHY_RX_DELAY_DELAY); REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); } if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { /* Calibrate the AGC */ REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL); /* Poll for offset calibration complete */ status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); ar9003_hw_do_pcoem_manual_peak_cal(ah, chan, run_rtt_cal); } if (REG_READ(ah, AR_PHY_CL_CAL_CTL) & AR_PHY_CL_CAL_ENABLE) { REG_WRITE(ah, AR_PHY_RX_DELAY, rx_delay); udelay(5); } if (ath9k_hw_mci_is_enabled(ah) && IS_CHAN_2GHZ(chan) && run_agc_cal) ar9003_mci_init_cal_done(ah); if (rtt && !run_rtt_cal) { agc_ctrl |= agc_supp_cals; REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), agc_ctrl); } if (!status) { if (run_rtt_cal) ar9003_hw_rtt_disable(ah); ath_dbg(common, CALIBRATE, "offset calibration failed to complete in %d ms; noisy environment?\n", AH_WAIT_TIMEOUT / 1000); return false; } if (txiqcal_done) ar9003_hw_tx_iq_cal_post_proc(ah, 0, is_reusable); else if (caldata && test_bit(TXIQCAL_DONE, &caldata->cal_flags)) ar9003_hw_tx_iq_cal_reload(ah); ar9003_hw_cl_cal_post_proc(ah, is_reusable); if (run_rtt_cal && caldata) { if (is_reusable) { if (!ath9k_hw_rfbus_req(ah)) { ath_err(ath9k_hw_common(ah), "Could not stop baseband\n"); } else { ar9003_hw_rtt_fill_hist(ah); if (test_bit(SW_PKDET_DONE, &caldata->cal_flags)) ar9003_hw_rtt_load_hist(ah); } ath9k_hw_rfbus_done(ah); } ar9003_hw_rtt_disable(ah); } /* Revert chainmask to runtime parameters */ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); ar9003_hw_init_cal_common(ah); return true; } static bool do_ar9003_agc_cal(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); bool status; REG_WRITE(ah, AR_PHY_AGC_CONTROL(ah), REG_READ(ah, AR_PHY_AGC_CONTROL(ah)) | AR_PHY_AGC_CONTROL_CAL); status = ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL(ah), AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT); if (!status) { ath_dbg(common, CALIBRATE, "offset calibration failed to complete in %d ms," "noisy environment?\n", AH_WAIT_TIMEOUT / 1000); return false; } return true; } static bool ar9003_hw_init_cal_soc(struct ath_hw *ah, struct ath9k_channel *chan) { bool txiqcal_done = false; bool status = true; bool run_agc_cal = false, sep_iq_cal = false; int i = 0; /* Use chip chainmask only for calibration */ ar9003_hw_set_chain_masks(ah, ah->caps.rx_chainmask, ah->caps.tx_chainmask); if (ah->enabled_cals & TX_CL_CAL) { REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); run_agc_cal = true; } if (IS_CHAN_HALF_RATE(chan) || IS_CHAN_QUARTER_RATE(chan)) goto skip_tx_iqcal; /* Do Tx IQ Calibration */ REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1(ah), AR_PHY_TX_IQCAL_CONTROL_1_IQCORR_I_Q_COFF_DELPT, DELPT); /* * For AR9485 or later chips, TxIQ cal runs as part of * AGC calibration. Specifically, AR9550 in SoC chips. */ if (ah->enabled_cals & TX_IQ_ON_AGC_CAL) { if (REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_0(ah), AR_PHY_TX_IQCAL_CONTROL_0_ENABLE_TXIQ_CAL)) { txiqcal_done = true; } else { txiqcal_done = false; } run_agc_cal = true; } else { sep_iq_cal = true; run_agc_cal = true; } /* * In the SoC family, this will run for AR9300, AR9331 and AR9340. */ if (sep_iq_cal) { txiqcal_done = ar9003_hw_tx_iq_cal_run(ah); REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); udelay(5); REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); } if (AR_SREV_9550(ah) && IS_CHAN_2GHZ(chan)) { if (!ar9003_hw_dynamic_osdac_selection(ah, txiqcal_done)) return false; } skip_tx_iqcal: if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { for (i = 0; i < AR9300_MAX_CHAINS; i++) { if (!(ah->rxchainmask & (1 << i))) continue; ar9003_hw_manual_peak_cal(ah, i, IS_CHAN_2GHZ(chan)); } /* * For non-AR9550 chips, we just trigger AGC calibration * in the HW, poll for completion and then process * the results. * * For AR955x, we run it multiple times and use * median IQ correction. */ if (!AR_SREV_9550(ah)) { status = do_ar9003_agc_cal(ah); if (!status) return false; if (txiqcal_done) ar9003_hw_tx_iq_cal_post_proc(ah, 0, false); } else { if (!txiqcal_done) { status = do_ar9003_agc_cal(ah); if (!status) return false; } else { for (i = 0; i < MAXIQCAL; i++) { status = do_ar9003_agc_cal(ah); if (!status) return false; ar9003_hw_tx_iq_cal_post_proc(ah, i, false); } } } } /* Revert chainmask to runtime parameters */ ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); ar9003_hw_init_cal_common(ah); return true; } void ar9003_hw_attach_calib_ops(struct ath_hw *ah) { struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); struct ath_hw_ops *ops = ath9k_hw_ops(ah); if (AR_SREV_9003_PCOEM(ah)) priv_ops->init_cal = ar9003_hw_init_cal_pcoem; else priv_ops->init_cal = ar9003_hw_init_cal_soc; priv_ops->init_cal_settings = ar9003_hw_init_cal_settings; priv_ops->setup_calibration = ar9003_hw_setup_calibration; ops->calibrate = ar9003_hw_calibrate; }
// SPDX-License-Identifier: GPL-2.0 /* * Keystone 2 lamarr SoC clock nodes * * Copyright (C) 2013-2017 Texas Instruments Incorporated - https://www.ti.com/ */ clocks { armpllclk: armpllclk@2620370 { #clock-cells = <0>; compatible = "ti,keystone,pll-clock"; clocks = <&refclksys>; clock-output-names = "arm-pll-clk"; reg = <0x02620370 4>; reg-names = "control"; }; mainpllclk: mainpllclk@2310110 { #clock-cells = <0>; compatible = "ti,keystone,main-pll-clock"; clocks = <&refclksys>; reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>; reg-names = "control", "multiplier", "post-divider"; }; papllclk: papllclk@2620358 { #clock-cells = <0>; compatible = "ti,keystone,pll-clock"; clocks = <&refclksys>; clock-output-names = "papllclk"; reg = <0x02620358 4>; reg-names = "control"; }; ddr3apllclk: ddr3apllclk@2620360 { #clock-cells = <0>; compatible = "ti,keystone,pll-clock"; clocks = <&refclksys>; clock-output-names = "ddr-3a-pll-clk"; reg = <0x02620360 4>; reg-names = "control"; }; clkdfeiqnsys: clkdfeiqnsys@2350004 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk12>; clock-output-names = "dfe"; reg-names = "control", "domain"; reg = <0x02350004 0xb00>, <0x02350000 0x400>; domain-id = <0>; }; clkpcie1: clkpcie1@235002c { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk12>; clock-output-names = "pcie"; reg = <0x0235002c 0xb00>, <0x02350000 0x400>; reg-names = "control", "domain"; domain-id = <4>; }; clkgem1: clkgem1@2350040 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk1>; clock-output-names = "gem1"; reg = <0x02350040 0xb00>, <0x02350024 0x400>; reg-names = "control", "domain"; domain-id = <9>; }; clkgem2: clkgem2@2350044 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk1>; clock-output-names = "gem2"; reg = <0x02350044 0xb00>, <0x02350028 0x400>; reg-names = "control", "domain"; domain-id = <10>; }; clkgem3: clkgem3@2350048 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk1>; clock-output-names = "gem3"; reg = <0x02350048 0xb00>, <0x0235002c 0x400>; reg-names = "control", "domain"; domain-id = <11>; }; clktac: clktac@2350064 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "tac"; reg = <0x02350064 0xb00>, <0x02350044 0x400>; reg-names = "control", "domain"; domain-id = <17>; }; clkrac: clkrac@2350068 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "rac"; reg = <0x02350068 0xb00>, <0x02350044 0x400>; reg-names = "control", "domain"; domain-id = <17>; }; clkdfepd0: clkdfepd0@235006c { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "dfe-pd0"; reg = <0x0235006c 0xb00>, <0x02350044 0x400>; reg-names = "control", "domain"; domain-id = <18>; }; clkfftc0: clkfftc0@2350070 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "fftc-0"; reg = <0x02350070 0xb00>, <0x0235004c 0x400>; reg-names = "control", "domain"; domain-id = <19>; }; clkosr: clkosr@2350088 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "osr"; reg = <0x02350088 0xb00>, <0x0235004c 0x400>; reg-names = "control", "domain"; domain-id = <21>; }; clktcp3d0: clktcp3d0@235008c { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "tcp3d-0"; reg = <0x0235008c 0xb00>, <0x02350058 0x400>; reg-names = "control", "domain"; domain-id = <22>; }; clktcp3d1: clktcp3d1@2350094 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "tcp3d-1"; reg = <0x02350094 0xb00>, <0x02350058 0x400>; reg-names = "control", "domain"; domain-id = <23>; }; clkvcp0: clkvcp0@235009c { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "vcp-0"; reg = <0x0235009c 0xb00>, <0x02350060 0x400>; reg-names = "control", "domain"; domain-id = <24>; }; clkvcp1: clkvcp1@23500a0 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "vcp-1"; reg = <0x023500a0 0xb00>, <0x02350060 0x400>; reg-names = "control", "domain"; domain-id = <24>; }; clkvcp2: clkvcp2@23500a4 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "vcp-2"; reg = <0x023500a4 0xb00>, <0x02350060 0x400>; reg-names = "control", "domain"; domain-id = <24>; }; clkvcp3: clkvcp3@23500a8 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "vcp-3"; reg = <0x023500a8 0xb00>, <0x02350060 0x400>; reg-names = "control", "domain"; domain-id = <24>; }; clkbcp: clkbcp@23500bc { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "bcp"; reg = <0x023500bc 0xb00>, <0x02350068 0x400>; reg-names = "control", "domain"; domain-id = <26>; }; clkdfepd1: clkdfepd1@23500c0 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "dfe-pd1"; reg = <0x023500c0 0xb00>, <0x02350044 0x400>; reg-names = "control", "domain"; domain-id = <27>; }; clkfftc1: clkfftc1@23500c4 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "fftc-1"; reg = <0x023500c4 0xb00>, <0x023504c0 0x400>; reg-names = "control", "domain"; domain-id = <28>; }; clkiqnail: clkiqnail@23500c8 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&chipclk13>; clock-output-names = "iqn-ail"; reg = <0x023500c8 0xb00>, <0x0235004c 0x400>; reg-names = "control", "domain"; domain-id = <29>; }; clkuart2: clkuart2@2350000 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&clkmodrst0>; clock-output-names = "uart2"; reg = <0x02350000 0xb00>, <0x02350000 0x400>; reg-names = "control", "domain"; domain-id = <0>; }; clkuart3: clkuart3@2350000 { #clock-cells = <0>; compatible = "ti,keystone,psc-clock"; clocks = <&clkmodrst0>; clock-output-names = "uart3"; reg = <0x02350000 0xb00>, <0x02350000 0x400>; reg-names = "control", "domain"; domain-id = <0>; }; };
// SPDX-License-Identifier: GPL-2.0+ /* * Dummy inodes to buffer blocks for garbage collection * * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. * * Written by Seiji Kihara, Amagai Yoshiji, and Ryusuke Konishi. * Revised by Ryusuke Konishi. * */ /* * This file adds the cache of on-disk blocks to be moved in garbage * collection. The disk blocks are held with dummy inodes (called * gcinodes), and this file provides lookup function of the dummy * inodes and their buffer read function. * * Buffers and pages held by the dummy inodes will be released each * time after they are copied to a new log. Dirty blocks made on the * current generation and the blocks to be moved by GC never overlap * because the dirty blocks make a new generation; they rather must be * written individually. */ #include <linux/buffer_head.h> #include <linux/mpage.h> #include <linux/hash.h> #include <linux/slab.h> #include <linux/swap.h> #include "nilfs.h" #include "btree.h" #include "btnode.h" #include "page.h" #include "mdt.h" #include "dat.h" #include "ifile.h" /* * nilfs_gccache_submit_read_data() - add data buffer and submit read request * @inode - gc inode * @blkoff - dummy offset treated as the key for the page cache * @pbn - physical block number of the block * @vbn - virtual block number of the block, 0 for non-virtual block * @out_bh - indirect pointer to a buffer_head struct to receive the results * * Description: nilfs_gccache_submit_read_data() registers the data buffer * specified by @pbn to the GC pagecache with the key @blkoff. * This function sets @vbn (@pbn if @vbn is zero) in b_blocknr of the buffer. * * Return Value: On success, 0 is returned. On Error, one of the following * negative error code is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. * * %-ENOENT - The block specified with @pbn does not exist. */ int nilfs_gccache_submit_read_data(struct inode *inode, sector_t blkoff, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) { struct buffer_head *bh; int err; bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0); if (unlikely(!bh)) return -ENOMEM; if (buffer_uptodate(bh)) goto out; if (pbn == 0) { struct the_nilfs *nilfs = inode->i_sb->s_fs_info; err = nilfs_dat_translate(nilfs->ns_dat, vbn, &pbn); if (unlikely(err)) /* -EIO, -ENOMEM, -ENOENT */ goto failed; } lock_buffer(bh); if (buffer_uptodate(bh)) { unlock_buffer(bh); goto out; } if (!buffer_mapped(bh)) set_buffer_mapped(bh); bh->b_blocknr = pbn; bh->b_end_io = end_buffer_read_sync; get_bh(bh); submit_bh(REQ_OP_READ, bh); if (vbn) bh->b_blocknr = vbn; out: err = 0; *out_bh = bh; failed: folio_unlock(bh->b_folio); folio_put(bh->b_folio); if (unlikely(err)) brelse(bh); return err; } /* * nilfs_gccache_submit_read_node() - add node buffer and submit read request * @inode - gc inode * @pbn - physical block number for the block * @vbn - virtual block number for the block * @out_bh - indirect pointer to a buffer_head struct to receive the results * * Description: nilfs_gccache_submit_read_node() registers the node buffer * specified by @vbn to the GC pagecache. @pbn can be supplied by the * caller to avoid translation of the disk block address. * * Return Value: On success, 0 is returned. On Error, one of the following * negative error code is returned. * * %-EIO - I/O error. * * %-ENOMEM - Insufficient amount of memory available. */ int nilfs_gccache_submit_read_node(struct inode *inode, sector_t pbn, __u64 vbn, struct buffer_head **out_bh) { struct inode *btnc_inode = NILFS_I(inode)->i_assoc_inode; int ret; ret = nilfs_btnode_submit_block(btnc_inode->i_mapping, vbn ? : pbn, pbn, REQ_OP_READ, out_bh, &pbn); if (ret == -EEXIST) /* internal code (cache hit) */ ret = 0; return ret; } int nilfs_gccache_wait_and_mark_dirty(struct buffer_head *bh) { wait_on_buffer(bh); if (!buffer_uptodate(bh)) { struct inode *inode = bh->b_folio->mapping->host; nilfs_err(inode->i_sb, "I/O error reading %s block for GC (ino=%lu, vblocknr=%llu)", buffer_nilfs_node(bh) ? "node" : "data", inode->i_ino, (unsigned long long)bh->b_blocknr); return -EIO; } if (buffer_dirty(bh)) return -EEXIST; if (buffer_nilfs_node(bh) && nilfs_btree_broken_node_block(bh)) { clear_buffer_uptodate(bh); return -EIO; } mark_buffer_dirty(bh); return 0; } int nilfs_init_gcinode(struct inode *inode) { struct nilfs_inode_info *ii = NILFS_I(inode); inode->i_mode = S_IFREG; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS); inode->i_mapping->a_ops = &empty_aops; ii->i_flags = 0; nilfs_bmap_init_gc(ii->i_bmap); return nilfs_attach_btree_node_cache(inode); } /** * nilfs_remove_all_gcinodes() - remove all unprocessed gc inodes * @nilfs: NILFS filesystem instance */ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs) { struct list_head *head = &nilfs->ns_gc_inodes; struct nilfs_inode_info *ii; while (!list_empty(head)) { ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); list_del_init(&ii->i_dirty); truncate_inode_pages(&ii->vfs_inode.i_data, 0); nilfs_btnode_cache_clear(ii->i_assoc_inode->i_mapping); iput(&ii->vfs_inode); } }
/* SPDX-License-Identifier: GPL-2.0-only */ /* * arch/arm/include/asm/page-nommu.h * * Copyright (C) 2004 Hyok S. Choi */ #ifndef _ASMARM_PAGE_NOMMU_H #define _ASMARM_PAGE_NOMMU_H #define clear_page(page) memset((page), 0, PAGE_SIZE) #define copy_page(to,from) memcpy((to), (from), PAGE_SIZE) #define clear_user_page(page, vaddr, pg) clear_page(page) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from) /* * These are used to make use of C type-checking.. */ typedef unsigned long pte_t; typedef unsigned long pmd_t; typedef unsigned long pgd_t[2]; typedef unsigned long pgprot_t; #define pte_val(x) (x) #define pmd_val(x) (x) #define pgd_val(x) ((x)[0]) #define pgprot_val(x) (x) #define __pte(x) (x) #define __pmd(x) (x) #define __pgprot(x) (x) #endif
/* * UVD_6_0 Register documentation * * Copyright (C) 2014 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #ifndef UVD_6_0_ENUM_H #define UVD_6_0_ENUM_H typedef enum UVDFirmwareCommand { UVDFC_FENCE = 0x0, UVDFC_TRAP = 0x1, UVDFC_DECODED_ADDR = 0x2, UVDFC_MBLOCK_ADDR = 0x3, UVDFC_ITBUF_ADDR = 0x4, UVDFC_DISPLAY_ADDR = 0x5, UVDFC_EOD = 0x6, UVDFC_DISPLAY_PITCH = 0x7, UVDFC_DISPLAY_TILING = 0x8, UVDFC_BITSTREAM_ADDR = 0x9, UVDFC_BITSTREAM_SIZE = 0xa, } UVDFirmwareCommand; typedef enum DebugBlockId { DBG_BLOCK_ID_RESERVED = 0x0, DBG_BLOCK_ID_DBG = 0x1, DBG_BLOCK_ID_VMC = 0x2, DBG_BLOCK_ID_PDMA = 0x3, DBG_BLOCK_ID_CG = 0x4, DBG_BLOCK_ID_SRBM = 0x5, DBG_BLOCK_ID_GRBM = 0x6, DBG_BLOCK_ID_RLC = 0x7, DBG_BLOCK_ID_CSC = 0x8, DBG_BLOCK_ID_SEM = 0x9, DBG_BLOCK_ID_IH = 0xa, DBG_BLOCK_ID_SC = 0xb, DBG_BLOCK_ID_SQ = 0xc, DBG_BLOCK_ID_UVDU = 0xd, DBG_BLOCK_ID_SQA = 0xe, DBG_BLOCK_ID_SDMA0 = 0xf, DBG_BLOCK_ID_SDMA1 = 0x10, DBG_BLOCK_ID_SPIM = 0x11, DBG_BLOCK_ID_GDS = 0x12, DBG_BLOCK_ID_VC0 = 0x13, DBG_BLOCK_ID_VC1 = 0x14, DBG_BLOCK_ID_PA0 = 0x15, DBG_BLOCK_ID_PA1 = 0x16, DBG_BLOCK_ID_CP0 = 0x17, DBG_BLOCK_ID_CP1 = 0x18, DBG_BLOCK_ID_CP2 = 0x19, DBG_BLOCK_ID_XBR = 0x1a, DBG_BLOCK_ID_UVDM = 0x1b, DBG_BLOCK_ID_VGT0 = 0x1c, DBG_BLOCK_ID_VGT1 = 0x1d, DBG_BLOCK_ID_IA = 0x1e, DBG_BLOCK_ID_SXM0 = 0x1f, DBG_BLOCK_ID_SXM1 = 0x20, DBG_BLOCK_ID_SCT0 = 0x21, DBG_BLOCK_ID_SCT1 = 0x22, DBG_BLOCK_ID_SPM0 = 0x23, DBG_BLOCK_ID_SPM1 = 0x24, DBG_BLOCK_ID_UNUSED0 = 0x25, DBG_BLOCK_ID_UNUSED1 = 0x26, DBG_BLOCK_ID_TCAA = 0x27, DBG_BLOCK_ID_TCAB = 0x28, DBG_BLOCK_ID_TCCA = 0x29, DBG_BLOCK_ID_TCCB = 0x2a, DBG_BLOCK_ID_MCC0 = 0x2b, DBG_BLOCK_ID_MCC1 = 0x2c, DBG_BLOCK_ID_MCC2 = 0x2d, DBG_BLOCK_ID_MCC3 = 0x2e, DBG_BLOCK_ID_SXS0 = 0x2f, DBG_BLOCK_ID_SXS1 = 0x30, DBG_BLOCK_ID_SXS2 = 0x31, DBG_BLOCK_ID_SXS3 = 0x32, DBG_BLOCK_ID_SXS4 = 0x33, DBG_BLOCK_ID_SXS5 = 0x34, DBG_BLOCK_ID_SXS6 = 0x35, DBG_BLOCK_ID_SXS7 = 0x36, DBG_BLOCK_ID_SXS8 = 0x37, DBG_BLOCK_ID_SXS9 = 0x38, DBG_BLOCK_ID_BCI0 = 0x39, DBG_BLOCK_ID_BCI1 = 0x3a, DBG_BLOCK_ID_BCI2 = 0x3b, DBG_BLOCK_ID_BCI3 = 0x3c, DBG_BLOCK_ID_MCB = 0x3d, DBG_BLOCK_ID_UNUSED6 = 0x3e, DBG_BLOCK_ID_SQA00 = 0x3f, DBG_BLOCK_ID_SQA01 = 0x40, DBG_BLOCK_ID_SQA02 = 0x41, DBG_BLOCK_ID_SQA10 = 0x42, DBG_BLOCK_ID_SQA11 = 0x43, DBG_BLOCK_ID_SQA12 = 0x44, DBG_BLOCK_ID_UNUSED7 = 0x45, DBG_BLOCK_ID_UNUSED8 = 0x46, DBG_BLOCK_ID_SQB00 = 0x47, DBG_BLOCK_ID_SQB01 = 0x48, DBG_BLOCK_ID_SQB10 = 0x49, DBG_BLOCK_ID_SQB11 = 0x4a, DBG_BLOCK_ID_SQ00 = 0x4b, DBG_BLOCK_ID_SQ01 = 0x4c, DBG_BLOCK_ID_SQ10 = 0x4d, DBG_BLOCK_ID_SQ11 = 0x4e, DBG_BLOCK_ID_CB00 = 0x4f, DBG_BLOCK_ID_CB01 = 0x50, DBG_BLOCK_ID_CB02 = 0x51, DBG_BLOCK_ID_CB03 = 0x52, DBG_BLOCK_ID_CB04 = 0x53, DBG_BLOCK_ID_UNUSED9 = 0x54, DBG_BLOCK_ID_UNUSED10 = 0x55, DBG_BLOCK_ID_UNUSED11 = 0x56, DBG_BLOCK_ID_CB10 = 0x57, DBG_BLOCK_ID_CB11 = 0x58, DBG_BLOCK_ID_CB12 = 0x59, DBG_BLOCK_ID_CB13 = 0x5a, DBG_BLOCK_ID_CB14 = 0x5b, DBG_BLOCK_ID_UNUSED12 = 0x5c, DBG_BLOCK_ID_UNUSED13 = 0x5d, DBG_BLOCK_ID_UNUSED14 = 0x5e, DBG_BLOCK_ID_TCP0 = 0x5f, DBG_BLOCK_ID_TCP1 = 0x60, DBG_BLOCK_ID_TCP2 = 0x61, DBG_BLOCK_ID_TCP3 = 0x62, DBG_BLOCK_ID_TCP4 = 0x63, DBG_BLOCK_ID_TCP5 = 0x64, DBG_BLOCK_ID_TCP6 = 0x65, DBG_BLOCK_ID_TCP7 = 0x66, DBG_BLOCK_ID_TCP8 = 0x67, DBG_BLOCK_ID_TCP9 = 0x68, DBG_BLOCK_ID_TCP10 = 0x69, DBG_BLOCK_ID_TCP11 = 0x6a, DBG_BLOCK_ID_TCP12 = 0x6b, DBG_BLOCK_ID_TCP13 = 0x6c, DBG_BLOCK_ID_TCP14 = 0x6d, DBG_BLOCK_ID_TCP15 = 0x6e, DBG_BLOCK_ID_TCP16 = 0x6f, DBG_BLOCK_ID_TCP17 = 0x70, DBG_BLOCK_ID_TCP18 = 0x71, DBG_BLOCK_ID_TCP19 = 0x72, DBG_BLOCK_ID_TCP20 = 0x73, DBG_BLOCK_ID_TCP21 = 0x74, DBG_BLOCK_ID_TCP22 = 0x75, DBG_BLOCK_ID_TCP23 = 0x76, DBG_BLOCK_ID_TCP_RESERVED0 = 0x77, DBG_BLOCK_ID_TCP_RESERVED1 = 0x78, DBG_BLOCK_ID_TCP_RESERVED2 = 0x79, DBG_BLOCK_ID_TCP_RESERVED3 = 0x7a, DBG_BLOCK_ID_TCP_RESERVED4 = 0x7b, DBG_BLOCK_ID_TCP_RESERVED5 = 0x7c, DBG_BLOCK_ID_TCP_RESERVED6 = 0x7d, DBG_BLOCK_ID_TCP_RESERVED7 = 0x7e, DBG_BLOCK_ID_DB00 = 0x7f, DBG_BLOCK_ID_DB01 = 0x80, DBG_BLOCK_ID_DB02 = 0x81, DBG_BLOCK_ID_DB03 = 0x82, DBG_BLOCK_ID_DB04 = 0x83, DBG_BLOCK_ID_UNUSED15 = 0x84, DBG_BLOCK_ID_UNUSED16 = 0x85, DBG_BLOCK_ID_UNUSED17 = 0x86, DBG_BLOCK_ID_DB10 = 0x87, DBG_BLOCK_ID_DB11 = 0x88, DBG_BLOCK_ID_DB12 = 0x89, DBG_BLOCK_ID_DB13 = 0x8a, DBG_BLOCK_ID_DB14 = 0x8b, DBG_BLOCK_ID_UNUSED18 = 0x8c, DBG_BLOCK_ID_UNUSED19 = 0x8d, DBG_BLOCK_ID_UNUSED20 = 0x8e, DBG_BLOCK_ID_TCC0 = 0x8f, DBG_BLOCK_ID_TCC1 = 0x90, DBG_BLOCK_ID_TCC2 = 0x91, DBG_BLOCK_ID_TCC3 = 0x92, DBG_BLOCK_ID_TCC4 = 0x93, DBG_BLOCK_ID_TCC5 = 0x94, DBG_BLOCK_ID_TCC6 = 0x95, DBG_BLOCK_ID_TCC7 = 0x96, DBG_BLOCK_ID_SPS00 = 0x97, DBG_BLOCK_ID_SPS01 = 0x98, DBG_BLOCK_ID_SPS02 = 0x99, DBG_BLOCK_ID_SPS10 = 0x9a, DBG_BLOCK_ID_SPS11 = 0x9b, DBG_BLOCK_ID_SPS12 = 0x9c, DBG_BLOCK_ID_UNUSED21 = 0x9d, DBG_BLOCK_ID_UNUSED22 = 0x9e, DBG_BLOCK_ID_TA00 = 0x9f, DBG_BLOCK_ID_TA01 = 0xa0, DBG_BLOCK_ID_TA02 = 0xa1, DBG_BLOCK_ID_TA03 = 0xa2, DBG_BLOCK_ID_TA04 = 0xa3, DBG_BLOCK_ID_TA05 = 0xa4, DBG_BLOCK_ID_TA06 = 0xa5, DBG_BLOCK_ID_TA07 = 0xa6, DBG_BLOCK_ID_TA08 = 0xa7, DBG_BLOCK_ID_TA09 = 0xa8, DBG_BLOCK_ID_TA0A = 0xa9, DBG_BLOCK_ID_TA0B = 0xaa, DBG_BLOCK_ID_UNUSED23 = 0xab, DBG_BLOCK_ID_UNUSED24 = 0xac, DBG_BLOCK_ID_UNUSED25 = 0xad, DBG_BLOCK_ID_UNUSED26 = 0xae, DBG_BLOCK_ID_TA10 = 0xaf, DBG_BLOCK_ID_TA11 = 0xb0, DBG_BLOCK_ID_TA12 = 0xb1, DBG_BLOCK_ID_TA13 = 0xb2, DBG_BLOCK_ID_TA14 = 0xb3, DBG_BLOCK_ID_TA15 = 0xb4, DBG_BLOCK_ID_TA16 = 0xb5, DBG_BLOCK_ID_TA17 = 0xb6, DBG_BLOCK_ID_TA18 = 0xb7, DBG_BLOCK_ID_TA19 = 0xb8, DBG_BLOCK_ID_TA1A = 0xb9, DBG_BLOCK_ID_TA1B = 0xba, DBG_BLOCK_ID_UNUSED27 = 0xbb, DBG_BLOCK_ID_UNUSED28 = 0xbc, DBG_BLOCK_ID_UNUSED29 = 0xbd, DBG_BLOCK_ID_UNUSED30 = 0xbe, DBG_BLOCK_ID_TD00 = 0xbf, DBG_BLOCK_ID_TD01 = 0xc0, DBG_BLOCK_ID_TD02 = 0xc1, DBG_BLOCK_ID_TD03 = 0xc2, DBG_BLOCK_ID_TD04 = 0xc3, DBG_BLOCK_ID_TD05 = 0xc4, DBG_BLOCK_ID_TD06 = 0xc5, DBG_BLOCK_ID_TD07 = 0xc6, DBG_BLOCK_ID_TD08 = 0xc7, DBG_BLOCK_ID_TD09 = 0xc8, DBG_BLOCK_ID_TD0A = 0xc9, DBG_BLOCK_ID_TD0B = 0xca, DBG_BLOCK_ID_UNUSED31 = 0xcb, DBG_BLOCK_ID_UNUSED32 = 0xcc, DBG_BLOCK_ID_UNUSED33 = 0xcd, DBG_BLOCK_ID_UNUSED34 = 0xce, DBG_BLOCK_ID_TD10 = 0xcf, DBG_BLOCK_ID_TD11 = 0xd0, DBG_BLOCK_ID_TD12 = 0xd1, DBG_BLOCK_ID_TD13 = 0xd2, DBG_BLOCK_ID_TD14 = 0xd3, DBG_BLOCK_ID_TD15 = 0xd4, DBG_BLOCK_ID_TD16 = 0xd5, DBG_BLOCK_ID_TD17 = 0xd6, DBG_BLOCK_ID_TD18 = 0xd7, DBG_BLOCK_ID_TD19 = 0xd8, DBG_BLOCK_ID_TD1A = 0xd9, DBG_BLOCK_ID_TD1B = 0xda, DBG_BLOCK_ID_UNUSED35 = 0xdb, DBG_BLOCK_ID_UNUSED36 = 0xdc, DBG_BLOCK_ID_UNUSED37 = 0xdd, DBG_BLOCK_ID_UNUSED38 = 0xde, DBG_BLOCK_ID_LDS00 = 0xdf, DBG_BLOCK_ID_LDS01 = 0xe0, DBG_BLOCK_ID_LDS02 = 0xe1, DBG_BLOCK_ID_LDS03 = 0xe2, DBG_BLOCK_ID_LDS04 = 0xe3, DBG_BLOCK_ID_LDS05 = 0xe4, DBG_BLOCK_ID_LDS06 = 0xe5, DBG_BLOCK_ID_LDS07 = 0xe6, DBG_BLOCK_ID_LDS08 = 0xe7, DBG_BLOCK_ID_LDS09 = 0xe8, DBG_BLOCK_ID_LDS0A = 0xe9, DBG_BLOCK_ID_LDS0B = 0xea, DBG_BLOCK_ID_UNUSED39 = 0xeb, DBG_BLOCK_ID_UNUSED40 = 0xec, DBG_BLOCK_ID_UNUSED41 = 0xed, DBG_BLOCK_ID_UNUSED42 = 0xee, DBG_BLOCK_ID_LDS10 = 0xef, DBG_BLOCK_ID_LDS11 = 0xf0, DBG_BLOCK_ID_LDS12 = 0xf1, DBG_BLOCK_ID_LDS13 = 0xf2, DBG_BLOCK_ID_LDS14 = 0xf3, DBG_BLOCK_ID_LDS15 = 0xf4, DBG_BLOCK_ID_LDS16 = 0xf5, DBG_BLOCK_ID_LDS17 = 0xf6, DBG_BLOCK_ID_LDS18 = 0xf7, DBG_BLOCK_ID_LDS19 = 0xf8, DBG_BLOCK_ID_LDS1A = 0xf9, DBG_BLOCK_ID_LDS1B = 0xfa, DBG_BLOCK_ID_UNUSED43 = 0xfb, DBG_BLOCK_ID_UNUSED44 = 0xfc, DBG_BLOCK_ID_UNUSED45 = 0xfd, DBG_BLOCK_ID_UNUSED46 = 0xfe, } DebugBlockId; typedef enum DebugBlockId_BY2 { DBG_BLOCK_ID_RESERVED_BY2 = 0x0, DBG_BLOCK_ID_VMC_BY2 = 0x1, DBG_BLOCK_ID_UNUSED0_BY2 = 0x2, DBG_BLOCK_ID_GRBM_BY2 = 0x3, DBG_BLOCK_ID_CSC_BY2 = 0x4, DBG_BLOCK_ID_IH_BY2 = 0x5, DBG_BLOCK_ID_SQ_BY2 = 0x6, DBG_BLOCK_ID_UVD_BY2 = 0x7, DBG_BLOCK_ID_SDMA0_BY2 = 0x8, DBG_BLOCK_ID_SPIM_BY2 = 0x9, DBG_BLOCK_ID_VC0_BY2 = 0xa, DBG_BLOCK_ID_PA_BY2 = 0xb, DBG_BLOCK_ID_CP0_BY2 = 0xc, DBG_BLOCK_ID_CP2_BY2 = 0xd, DBG_BLOCK_ID_PC0_BY2 = 0xe, DBG_BLOCK_ID_BCI0_BY2 = 0xf, DBG_BLOCK_ID_SXM0_BY2 = 0x10, DBG_BLOCK_ID_SCT0_BY2 = 0x11, DBG_BLOCK_ID_SPM0_BY2 = 0x12, DBG_BLOCK_ID_BCI2_BY2 = 0x13, DBG_BLOCK_ID_TCA_BY2 = 0x14, DBG_BLOCK_ID_TCCA_BY2 = 0x15, DBG_BLOCK_ID_MCC_BY2 = 0x16, DBG_BLOCK_ID_MCC2_BY2 = 0x17, DBG_BLOCK_ID_MCD_BY2 = 0x18, DBG_BLOCK_ID_MCD2_BY2 = 0x19, DBG_BLOCK_ID_MCD4_BY2 = 0x1a, DBG_BLOCK_ID_MCB_BY2 = 0x1b, DBG_BLOCK_ID_SQA_BY2 = 0x1c, DBG_BLOCK_ID_SQA02_BY2 = 0x1d, DBG_BLOCK_ID_SQA11_BY2 = 0x1e, DBG_BLOCK_ID_UNUSED8_BY2 = 0x1f, DBG_BLOCK_ID_SQB_BY2 = 0x20, DBG_BLOCK_ID_SQB10_BY2 = 0x21, DBG_BLOCK_ID_UNUSED10_BY2 = 0x22, DBG_BLOCK_ID_UNUSED12_BY2 = 0x23, DBG_BLOCK_ID_CB_BY2 = 0x24, DBG_BLOCK_ID_CB02_BY2 = 0x25, DBG_BLOCK_ID_CB10_BY2 = 0x26, DBG_BLOCK_ID_CB12_BY2 = 0x27, DBG_BLOCK_ID_SXS_BY2 = 0x28, DBG_BLOCK_ID_SXS2_BY2 = 0x29, DBG_BLOCK_ID_SXS4_BY2 = 0x2a, DBG_BLOCK_ID_SXS6_BY2 = 0x2b, DBG_BLOCK_ID_DB_BY2 = 0x2c, DBG_BLOCK_ID_DB02_BY2 = 0x2d, DBG_BLOCK_ID_DB10_BY2 = 0x2e, DBG_BLOCK_ID_DB12_BY2 = 0x2f, DBG_BLOCK_ID_TCP_BY2 = 0x30, DBG_BLOCK_ID_TCP2_BY2 = 0x31, DBG_BLOCK_ID_TCP4_BY2 = 0x32, DBG_BLOCK_ID_TCP6_BY2 = 0x33, DBG_BLOCK_ID_TCP8_BY2 = 0x34, DBG_BLOCK_ID_TCP10_BY2 = 0x35, DBG_BLOCK_ID_TCP12_BY2 = 0x36, DBG_BLOCK_ID_TCP14_BY2 = 0x37, DBG_BLOCK_ID_TCP16_BY2 = 0x38, DBG_BLOCK_ID_TCP18_BY2 = 0x39, DBG_BLOCK_ID_TCP20_BY2 = 0x3a, DBG_BLOCK_ID_TCP22_BY2 = 0x3b, DBG_BLOCK_ID_TCP_RESERVED0_BY2 = 0x3c, DBG_BLOCK_ID_TCP_RESERVED2_BY2 = 0x3d, DBG_BLOCK_ID_TCP_RESERVED4_BY2 = 0x3e, DBG_BLOCK_ID_TCP_RESERVED6_BY2 = 0x3f, DBG_BLOCK_ID_TCC_BY2 = 0x40, DBG_BLOCK_ID_TCC2_BY2 = 0x41, DBG_BLOCK_ID_TCC4_BY2 = 0x42, DBG_BLOCK_ID_TCC6_BY2 = 0x43, DBG_BLOCK_ID_SPS_BY2 = 0x44, DBG_BLOCK_ID_SPS02_BY2 = 0x45, DBG_BLOCK_ID_SPS11_BY2 = 0x46, DBG_BLOCK_ID_UNUSED14_BY2 = 0x47, DBG_BLOCK_ID_TA_BY2 = 0x48, DBG_BLOCK_ID_TA02_BY2 = 0x49, DBG_BLOCK_ID_TA04_BY2 = 0x4a, DBG_BLOCK_ID_TA06_BY2 = 0x4b, DBG_BLOCK_ID_TA08_BY2 = 0x4c, DBG_BLOCK_ID_TA0A_BY2 = 0x4d, DBG_BLOCK_ID_UNUSED20_BY2 = 0x4e, DBG_BLOCK_ID_UNUSED22_BY2 = 0x4f, DBG_BLOCK_ID_TA10_BY2 = 0x50, DBG_BLOCK_ID_TA12_BY2 = 0x51, DBG_BLOCK_ID_TA14_BY2 = 0x52, DBG_BLOCK_ID_TA16_BY2 = 0x53, DBG_BLOCK_ID_TA18_BY2 = 0x54, DBG_BLOCK_ID_TA1A_BY2 = 0x55, DBG_BLOCK_ID_UNUSED24_BY2 = 0x56, DBG_BLOCK_ID_UNUSED26_BY2 = 0x57, DBG_BLOCK_ID_TD_BY2 = 0x58, DBG_BLOCK_ID_TD02_BY2 = 0x59, DBG_BLOCK_ID_TD04_BY2 = 0x5a, DBG_BLOCK_ID_TD06_BY2 = 0x5b, DBG_BLOCK_ID_TD08_BY2 = 0x5c, DBG_BLOCK_ID_TD0A_BY2 = 0x5d, DBG_BLOCK_ID_UNUSED28_BY2 = 0x5e, DBG_BLOCK_ID_UNUSED30_BY2 = 0x5f, DBG_BLOCK_ID_TD10_BY2 = 0x60, DBG_BLOCK_ID_TD12_BY2 = 0x61, DBG_BLOCK_ID_TD14_BY2 = 0x62, DBG_BLOCK_ID_TD16_BY2 = 0x63, DBG_BLOCK_ID_TD18_BY2 = 0x64, DBG_BLOCK_ID_TD1A_BY2 = 0x65, DBG_BLOCK_ID_UNUSED32_BY2 = 0x66, DBG_BLOCK_ID_UNUSED34_BY2 = 0x67, DBG_BLOCK_ID_LDS_BY2 = 0x68, DBG_BLOCK_ID_LDS02_BY2 = 0x69, DBG_BLOCK_ID_LDS04_BY2 = 0x6a, DBG_BLOCK_ID_LDS06_BY2 = 0x6b, DBG_BLOCK_ID_LDS08_BY2 = 0x6c, DBG_BLOCK_ID_LDS0A_BY2 = 0x6d, DBG_BLOCK_ID_UNUSED36_BY2 = 0x6e, DBG_BLOCK_ID_UNUSED38_BY2 = 0x6f, DBG_BLOCK_ID_LDS10_BY2 = 0x70, DBG_BLOCK_ID_LDS12_BY2 = 0x71, DBG_BLOCK_ID_LDS14_BY2 = 0x72, DBG_BLOCK_ID_LDS16_BY2 = 0x73, DBG_BLOCK_ID_LDS18_BY2 = 0x74, DBG_BLOCK_ID_LDS1A_BY2 = 0x75, DBG_BLOCK_ID_UNUSED40_BY2 = 0x76, DBG_BLOCK_ID_UNUSED42_BY2 = 0x77, } DebugBlockId_BY2; typedef enum DebugBlockId_BY4 { DBG_BLOCK_ID_RESERVED_BY4 = 0x0, DBG_BLOCK_ID_UNUSED0_BY4 = 0x1, DBG_BLOCK_ID_CSC_BY4 = 0x2, DBG_BLOCK_ID_SQ_BY4 = 0x3, DBG_BLOCK_ID_SDMA0_BY4 = 0x4, DBG_BLOCK_ID_VC0_BY4 = 0x5, DBG_BLOCK_ID_CP0_BY4 = 0x6, DBG_BLOCK_ID_UNUSED1_BY4 = 0x7, DBG_BLOCK_ID_SXM0_BY4 = 0x8, DBG_BLOCK_ID_SPM0_BY4 = 0x9, DBG_BLOCK_ID_TCAA_BY4 = 0xa, DBG_BLOCK_ID_MCC_BY4 = 0xb, DBG_BLOCK_ID_MCD_BY4 = 0xc, DBG_BLOCK_ID_MCD4_BY4 = 0xd, DBG_BLOCK_ID_SQA_BY4 = 0xe, DBG_BLOCK_ID_SQA11_BY4 = 0xf, DBG_BLOCK_ID_SQB_BY4 = 0x10, DBG_BLOCK_ID_UNUSED10_BY4 = 0x11, DBG_BLOCK_ID_CB_BY4 = 0x12, DBG_BLOCK_ID_CB10_BY4 = 0x13, DBG_BLOCK_ID_SXS_BY4 = 0x14, DBG_BLOCK_ID_SXS4_BY4 = 0x15, DBG_BLOCK_ID_DB_BY4 = 0x16, DBG_BLOCK_ID_DB10_BY4 = 0x17, DBG_BLOCK_ID_TCP_BY4 = 0x18, DBG_BLOCK_ID_TCP4_BY4 = 0x19, DBG_BLOCK_ID_TCP8_BY4 = 0x1a, DBG_BLOCK_ID_TCP12_BY4 = 0x1b, DBG_BLOCK_ID_TCP16_BY4 = 0x1c, DBG_BLOCK_ID_TCP20_BY4 = 0x1d, DBG_BLOCK_ID_TCP_RESERVED0_BY4 = 0x1e, DBG_BLOCK_ID_TCP_RESERVED4_BY4 = 0x1f, DBG_BLOCK_ID_TCC_BY4 = 0x20, DBG_BLOCK_ID_TCC4_BY4 = 0x21, DBG_BLOCK_ID_SPS_BY4 = 0x22, DBG_BLOCK_ID_SPS11_BY4 = 0x23, DBG_BLOCK_ID_TA_BY4 = 0x24, DBG_BLOCK_ID_TA04_BY4 = 0x25, DBG_BLOCK_ID_TA08_BY4 = 0x26, DBG_BLOCK_ID_UNUSED20_BY4 = 0x27, DBG_BLOCK_ID_TA10_BY4 = 0x28, DBG_BLOCK_ID_TA14_BY4 = 0x29, DBG_BLOCK_ID_TA18_BY4 = 0x2a, DBG_BLOCK_ID_UNUSED24_BY4 = 0x2b, DBG_BLOCK_ID_TD_BY4 = 0x2c, DBG_BLOCK_ID_TD04_BY4 = 0x2d, DBG_BLOCK_ID_TD08_BY4 = 0x2e, DBG_BLOCK_ID_UNUSED28_BY4 = 0x2f, DBG_BLOCK_ID_TD10_BY4 = 0x30, DBG_BLOCK_ID_TD14_BY4 = 0x31, DBG_BLOCK_ID_TD18_BY4 = 0x32, DBG_BLOCK_ID_UNUSED32_BY4 = 0x33, DBG_BLOCK_ID_LDS_BY4 = 0x34, DBG_BLOCK_ID_LDS04_BY4 = 0x35, DBG_BLOCK_ID_LDS08_BY4 = 0x36, DBG_BLOCK_ID_UNUSED36_BY4 = 0x37, DBG_BLOCK_ID_LDS10_BY4 = 0x38, DBG_BLOCK_ID_LDS14_BY4 = 0x39, DBG_BLOCK_ID_LDS18_BY4 = 0x3a, DBG_BLOCK_ID_UNUSED40_BY4 = 0x3b, } DebugBlockId_BY4; typedef enum DebugBlockId_BY8 { DBG_BLOCK_ID_RESERVED_BY8 = 0x0, DBG_BLOCK_ID_CSC_BY8 = 0x1, DBG_BLOCK_ID_SDMA0_BY8 = 0x2, DBG_BLOCK_ID_CP0_BY8 = 0x3, DBG_BLOCK_ID_SXM0_BY8 = 0x4, DBG_BLOCK_ID_TCA_BY8 = 0x5, DBG_BLOCK_ID_MCD_BY8 = 0x6, DBG_BLOCK_ID_SQA_BY8 = 0x7, DBG_BLOCK_ID_SQB_BY8 = 0x8, DBG_BLOCK_ID_CB_BY8 = 0x9, DBG_BLOCK_ID_SXS_BY8 = 0xa, DBG_BLOCK_ID_DB_BY8 = 0xb, DBG_BLOCK_ID_TCP_BY8 = 0xc, DBG_BLOCK_ID_TCP8_BY8 = 0xd, DBG_BLOCK_ID_TCP16_BY8 = 0xe, DBG_BLOCK_ID_TCP_RESERVED0_BY8 = 0xf, DBG_BLOCK_ID_TCC_BY8 = 0x10, DBG_BLOCK_ID_SPS_BY8 = 0x11, DBG_BLOCK_ID_TA_BY8 = 0x12, DBG_BLOCK_ID_TA08_BY8 = 0x13, DBG_BLOCK_ID_TA10_BY8 = 0x14, DBG_BLOCK_ID_TA18_BY8 = 0x15, DBG_BLOCK_ID_TD_BY8 = 0x16, DBG_BLOCK_ID_TD08_BY8 = 0x17, DBG_BLOCK_ID_TD10_BY8 = 0x18, DBG_BLOCK_ID_TD18_BY8 = 0x19, DBG_BLOCK_ID_LDS_BY8 = 0x1a, DBG_BLOCK_ID_LDS08_BY8 = 0x1b, DBG_BLOCK_ID_LDS10_BY8 = 0x1c, DBG_BLOCK_ID_LDS18_BY8 = 0x1d, } DebugBlockId_BY8; typedef enum DebugBlockId_BY16 { DBG_BLOCK_ID_RESERVED_BY16 = 0x0, DBG_BLOCK_ID_SDMA0_BY16 = 0x1, DBG_BLOCK_ID_SXM_BY16 = 0x2, DBG_BLOCK_ID_MCD_BY16 = 0x3, DBG_BLOCK_ID_SQB_BY16 = 0x4, DBG_BLOCK_ID_SXS_BY16 = 0x5, DBG_BLOCK_ID_TCP_BY16 = 0x6, DBG_BLOCK_ID_TCP16_BY16 = 0x7, DBG_BLOCK_ID_TCC_BY16 = 0x8, DBG_BLOCK_ID_TA_BY16 = 0x9, DBG_BLOCK_ID_TA10_BY16 = 0xa, DBG_BLOCK_ID_TD_BY16 = 0xb, DBG_BLOCK_ID_TD10_BY16 = 0xc, DBG_BLOCK_ID_LDS_BY16 = 0xd, DBG_BLOCK_ID_LDS10_BY16 = 0xe, } DebugBlockId_BY16; typedef enum SurfaceEndian { ENDIAN_NONE = 0x0, ENDIAN_8IN16 = 0x1, ENDIAN_8IN32 = 0x2, ENDIAN_8IN64 = 0x3, } SurfaceEndian; typedef enum ArrayMode { ARRAY_LINEAR_GENERAL = 0x0, ARRAY_LINEAR_ALIGNED = 0x1, ARRAY_1D_TILED_THIN1 = 0x2, ARRAY_1D_TILED_THICK = 0x3, ARRAY_2D_TILED_THIN1 = 0x4, ARRAY_PRT_TILED_THIN1 = 0x5, ARRAY_PRT_2D_TILED_THIN1 = 0x6, ARRAY_2D_TILED_THICK = 0x7, ARRAY_2D_TILED_XTHICK = 0x8, ARRAY_PRT_TILED_THICK = 0x9, ARRAY_PRT_2D_TILED_THICK = 0xa, ARRAY_PRT_3D_TILED_THIN1 = 0xb, ARRAY_3D_TILED_THIN1 = 0xc, ARRAY_3D_TILED_THICK = 0xd, ARRAY_3D_TILED_XTHICK = 0xe, ARRAY_PRT_3D_TILED_THICK = 0xf, } ArrayMode; typedef enum PipeTiling { CONFIG_1_PIPE = 0x0, CONFIG_2_PIPE = 0x1, CONFIG_4_PIPE = 0x2, CONFIG_8_PIPE = 0x3, } PipeTiling; typedef enum BankTiling { CONFIG_4_BANK = 0x0, CONFIG_8_BANK = 0x1, } BankTiling; typedef enum GroupInterleave { CONFIG_256B_GROUP = 0x0, CONFIG_512B_GROUP = 0x1, } GroupInterleave; typedef enum RowTiling { CONFIG_1KB_ROW = 0x0, CONFIG_2KB_ROW = 0x1, CONFIG_4KB_ROW = 0x2, CONFIG_8KB_ROW = 0x3, CONFIG_1KB_ROW_OPT = 0x4, CONFIG_2KB_ROW_OPT = 0x5, CONFIG_4KB_ROW_OPT = 0x6, CONFIG_8KB_ROW_OPT = 0x7, } RowTiling; typedef enum BankSwapBytes { CONFIG_128B_SWAPS = 0x0, CONFIG_256B_SWAPS = 0x1, CONFIG_512B_SWAPS = 0x2, CONFIG_1KB_SWAPS = 0x3, } BankSwapBytes; typedef enum SampleSplitBytes { CONFIG_1KB_SPLIT = 0x0, CONFIG_2KB_SPLIT = 0x1, CONFIG_4KB_SPLIT = 0x2, CONFIG_8KB_SPLIT = 0x3, } SampleSplitBytes; typedef enum NumPipes { ADDR_CONFIG_1_PIPE = 0x0, ADDR_CONFIG_2_PIPE = 0x1, ADDR_CONFIG_4_PIPE = 0x2, ADDR_CONFIG_8_PIPE = 0x3, } NumPipes; typedef enum PipeInterleaveSize { ADDR_CONFIG_PIPE_INTERLEAVE_256B = 0x0, ADDR_CONFIG_PIPE_INTERLEAVE_512B = 0x1, } PipeInterleaveSize; typedef enum BankInterleaveSize { ADDR_CONFIG_BANK_INTERLEAVE_1 = 0x0, ADDR_CONFIG_BANK_INTERLEAVE_2 = 0x1, ADDR_CONFIG_BANK_INTERLEAVE_4 = 0x2, ADDR_CONFIG_BANK_INTERLEAVE_8 = 0x3, } BankInterleaveSize; typedef enum NumShaderEngines { ADDR_CONFIG_1_SHADER_ENGINE = 0x0, ADDR_CONFIG_2_SHADER_ENGINE = 0x1, } NumShaderEngines; typedef enum ShaderEngineTileSize { ADDR_CONFIG_SE_TILE_16 = 0x0, ADDR_CONFIG_SE_TILE_32 = 0x1, } ShaderEngineTileSize; typedef enum NumGPUs { ADDR_CONFIG_1_GPU = 0x0, ADDR_CONFIG_2_GPU = 0x1, ADDR_CONFIG_4_GPU = 0x2, } NumGPUs; typedef enum MultiGPUTileSize { ADDR_CONFIG_GPU_TILE_16 = 0x0, ADDR_CONFIG_GPU_TILE_32 = 0x1, ADDR_CONFIG_GPU_TILE_64 = 0x2, ADDR_CONFIG_GPU_TILE_128 = 0x3, } MultiGPUTileSize; typedef enum RowSize { ADDR_CONFIG_1KB_ROW = 0x0, ADDR_CONFIG_2KB_ROW = 0x1, ADDR_CONFIG_4KB_ROW = 0x2, } RowSize; typedef enum NumLowerPipes { ADDR_CONFIG_1_LOWER_PIPES = 0x0, ADDR_CONFIG_2_LOWER_PIPES = 0x1, } NumLowerPipes; typedef enum ColorTransform { DCC_CT_AUTO = 0x0, DCC_CT_NONE = 0x1, ABGR_TO_A_BG_G_RB = 0x2, BGRA_TO_BG_G_RB_A = 0x3, } ColorTransform; typedef enum CompareRef { REF_NEVER = 0x0, REF_LESS = 0x1, REF_EQUAL = 0x2, REF_LEQUAL = 0x3, REF_GREATER = 0x4, REF_NOTEQUAL = 0x5, REF_GEQUAL = 0x6, REF_ALWAYS = 0x7, } CompareRef; typedef enum ReadSize { READ_256_BITS = 0x0, READ_512_BITS = 0x1, } ReadSize; typedef enum DepthFormat { DEPTH_INVALID = 0x0, DEPTH_16 = 0x1, DEPTH_X8_24 = 0x2, DEPTH_8_24 = 0x3, DEPTH_X8_24_FLOAT = 0x4, DEPTH_8_24_FLOAT = 0x5, DEPTH_32_FLOAT = 0x6, DEPTH_X24_8_32_FLOAT = 0x7, } DepthFormat; typedef enum ZFormat { Z_INVALID = 0x0, Z_16 = 0x1, Z_24 = 0x2, Z_32_FLOAT = 0x3, } ZFormat; typedef enum StencilFormat { STENCIL_INVALID = 0x0, STENCIL_8 = 0x1, } StencilFormat; typedef enum CmaskMode { CMASK_CLEAR_NONE = 0x0, CMASK_CLEAR_ONE = 0x1, CMASK_CLEAR_ALL = 0x2, CMASK_ANY_EXPANDED = 0x3, CMASK_ALPHA0_FRAG1 = 0x4, CMASK_ALPHA0_FRAG2 = 0x5, CMASK_ALPHA0_FRAG4 = 0x6, CMASK_ALPHA0_FRAGS = 0x7, CMASK_ALPHA1_FRAG1 = 0x8, CMASK_ALPHA1_FRAG2 = 0x9, CMASK_ALPHA1_FRAG4 = 0xa, CMASK_ALPHA1_FRAGS = 0xb, CMASK_ALPHAX_FRAG1 = 0xc, CMASK_ALPHAX_FRAG2 = 0xd, CMASK_ALPHAX_FRAG4 = 0xe, CMASK_ALPHAX_FRAGS = 0xf, } CmaskMode; typedef enum QuadExportFormat { EXPORT_UNUSED = 0x0, EXPORT_32_R = 0x1, EXPORT_32_GR = 0x2, EXPORT_32_AR = 0x3, EXPORT_FP16_ABGR = 0x4, EXPORT_UNSIGNED16_ABGR = 0x5, EXPORT_SIGNED16_ABGR = 0x6, EXPORT_32_ABGR = 0x7, } QuadExportFormat; typedef enum QuadExportFormatOld { EXPORT_4P_32BPC_ABGR = 0x0, EXPORT_4P_16BPC_ABGR = 0x1, EXPORT_4P_32BPC_GR = 0x2, EXPORT_4P_32BPC_AR = 0x3, EXPORT_2P_32BPC_ABGR = 0x4, EXPORT_8P_32BPC_R = 0x5, } QuadExportFormatOld; typedef enum ColorFormat { COLOR_INVALID = 0x0, COLOR_8 = 0x1, COLOR_16 = 0x2, COLOR_8_8 = 0x3, COLOR_32 = 0x4, COLOR_16_16 = 0x5, COLOR_10_11_11 = 0x6, COLOR_11_11_10 = 0x7, COLOR_10_10_10_2 = 0x8, COLOR_2_10_10_10 = 0x9, COLOR_8_8_8_8 = 0xa, COLOR_32_32 = 0xb, COLOR_16_16_16_16 = 0xc, COLOR_RESERVED_13 = 0xd, COLOR_32_32_32_32 = 0xe, COLOR_RESERVED_15 = 0xf, COLOR_5_6_5 = 0x10, COLOR_1_5_5_5 = 0x11, COLOR_5_5_5_1 = 0x12, COLOR_4_4_4_4 = 0x13, COLOR_8_24 = 0x14, COLOR_24_8 = 0x15, COLOR_X24_8_32_FLOAT = 0x16, COLOR_RESERVED_23 = 0x17, } ColorFormat; typedef enum SurfaceFormat { FMT_INVALID = 0x0, FMT_8 = 0x1, FMT_16 = 0x2, FMT_8_8 = 0x3, FMT_32 = 0x4, FMT_16_16 = 0x5, FMT_10_11_11 = 0x6, FMT_11_11_10 = 0x7, FMT_10_10_10_2 = 0x8, FMT_2_10_10_10 = 0x9, FMT_8_8_8_8 = 0xa, FMT_32_32 = 0xb, FMT_16_16_16_16 = 0xc, FMT_32_32_32 = 0xd, FMT_32_32_32_32 = 0xe, FMT_RESERVED_4 = 0xf, FMT_5_6_5 = 0x10, FMT_1_5_5_5 = 0x11, FMT_5_5_5_1 = 0x12, FMT_4_4_4_4 = 0x13, FMT_8_24 = 0x14, FMT_24_8 = 0x15, FMT_X24_8_32_FLOAT = 0x16, FMT_RESERVED_33 = 0x17, FMT_11_11_10_FLOAT = 0x18, FMT_16_FLOAT = 0x19, FMT_32_FLOAT = 0x1a, FMT_16_16_FLOAT = 0x1b, FMT_8_24_FLOAT = 0x1c, FMT_24_8_FLOAT = 0x1d, FMT_32_32_FLOAT = 0x1e, FMT_10_11_11_FLOAT = 0x1f, FMT_16_16_16_16_FLOAT = 0x20, FMT_3_3_2 = 0x21, FMT_6_5_5 = 0x22, FMT_32_32_32_32_FLOAT = 0x23, FMT_RESERVED_36 = 0x24, FMT_1 = 0x25, FMT_1_REVERSED = 0x26, FMT_GB_GR = 0x27, FMT_BG_RG = 0x28, FMT_32_AS_8 = 0x29, FMT_32_AS_8_8 = 0x2a, FMT_5_9_9_9_SHAREDEXP = 0x2b, FMT_8_8_8 = 0x2c, FMT_16_16_16 = 0x2d, FMT_16_16_16_FLOAT = 0x2e, FMT_4_4 = 0x2f, FMT_32_32_32_FLOAT = 0x30, FMT_BC1 = 0x31, FMT_BC2 = 0x32, FMT_BC3 = 0x33, FMT_BC4 = 0x34, FMT_BC5 = 0x35, FMT_BC6 = 0x36, FMT_BC7 = 0x37, FMT_32_AS_32_32_32_32 = 0x38, FMT_APC3 = 0x39, FMT_APC4 = 0x3a, FMT_APC5 = 0x3b, FMT_APC6 = 0x3c, FMT_APC7 = 0x3d, FMT_CTX1 = 0x3e, FMT_RESERVED_63 = 0x3f, } SurfaceFormat; typedef enum BUF_DATA_FORMAT { BUF_DATA_FORMAT_INVALID = 0x0, BUF_DATA_FORMAT_8 = 0x1, BUF_DATA_FORMAT_16 = 0x2, BUF_DATA_FORMAT_8_8 = 0x3, BUF_DATA_FORMAT_32 = 0x4, BUF_DATA_FORMAT_16_16 = 0x5, BUF_DATA_FORMAT_10_11_11 = 0x6, BUF_DATA_FORMAT_11_11_10 = 0x7, BUF_DATA_FORMAT_10_10_10_2 = 0x8, BUF_DATA_FORMAT_2_10_10_10 = 0x9, BUF_DATA_FORMAT_8_8_8_8 = 0xa, BUF_DATA_FORMAT_32_32 = 0xb, BUF_DATA_FORMAT_16_16_16_16 = 0xc, BUF_DATA_FORMAT_32_32_32 = 0xd, BUF_DATA_FORMAT_32_32_32_32 = 0xe, BUF_DATA_FORMAT_RESERVED_15 = 0xf, } BUF_DATA_FORMAT; typedef enum IMG_DATA_FORMAT { IMG_DATA_FORMAT_INVALID = 0x0, IMG_DATA_FORMAT_8 = 0x1, IMG_DATA_FORMAT_16 = 0x2, IMG_DATA_FORMAT_8_8 = 0x3, IMG_DATA_FORMAT_32 = 0x4, IMG_DATA_FORMAT_16_16 = 0x5, IMG_DATA_FORMAT_10_11_11 = 0x6, IMG_DATA_FORMAT_11_11_10 = 0x7, IMG_DATA_FORMAT_10_10_10_2 = 0x8, IMG_DATA_FORMAT_2_10_10_10 = 0x9, IMG_DATA_FORMAT_8_8_8_8 = 0xa, IMG_DATA_FORMAT_32_32 = 0xb, IMG_DATA_FORMAT_16_16_16_16 = 0xc, IMG_DATA_FORMAT_32_32_32 = 0xd, IMG_DATA_FORMAT_32_32_32_32 = 0xe, IMG_DATA_FORMAT_RESERVED_15 = 0xf, IMG_DATA_FORMAT_5_6_5 = 0x10, IMG_DATA_FORMAT_1_5_5_5 = 0x11, IMG_DATA_FORMAT_5_5_5_1 = 0x12, IMG_DATA_FORMAT_4_4_4_4 = 0x13, IMG_DATA_FORMAT_8_24 = 0x14, IMG_DATA_FORMAT_24_8 = 0x15, IMG_DATA_FORMAT_X24_8_32 = 0x16, IMG_DATA_FORMAT_RESERVED_23 = 0x17, IMG_DATA_FORMAT_RESERVED_24 = 0x18, IMG_DATA_FORMAT_RESERVED_25 = 0x19, IMG_DATA_FORMAT_RESERVED_26 = 0x1a, IMG_DATA_FORMAT_RESERVED_27 = 0x1b, IMG_DATA_FORMAT_RESERVED_28 = 0x1c, IMG_DATA_FORMAT_RESERVED_29 = 0x1d, IMG_DATA_FORMAT_RESERVED_30 = 0x1e, IMG_DATA_FORMAT_RESERVED_31 = 0x1f, IMG_DATA_FORMAT_GB_GR = 0x20, IMG_DATA_FORMAT_BG_RG = 0x21, IMG_DATA_FORMAT_5_9_9_9 = 0x22, IMG_DATA_FORMAT_BC1 = 0x23, IMG_DATA_FORMAT_BC2 = 0x24, IMG_DATA_FORMAT_BC3 = 0x25, IMG_DATA_FORMAT_BC4 = 0x26, IMG_DATA_FORMAT_BC5 = 0x27, IMG_DATA_FORMAT_BC6 = 0x28, IMG_DATA_FORMAT_BC7 = 0x29, IMG_DATA_FORMAT_RESERVED_42 = 0x2a, IMG_DATA_FORMAT_RESERVED_43 = 0x2b, IMG_DATA_FORMAT_FMASK8_S2_F1 = 0x2c, IMG_DATA_FORMAT_FMASK8_S4_F1 = 0x2d, IMG_DATA_FORMAT_FMASK8_S8_F1 = 0x2e, IMG_DATA_FORMAT_FMASK8_S2_F2 = 0x2f, IMG_DATA_FORMAT_FMASK8_S4_F2 = 0x30, IMG_DATA_FORMAT_FMASK8_S4_F4 = 0x31, IMG_DATA_FORMAT_FMASK16_S16_F1 = 0x32, IMG_DATA_FORMAT_FMASK16_S8_F2 = 0x33, IMG_DATA_FORMAT_FMASK32_S16_F2 = 0x34, IMG_DATA_FORMAT_FMASK32_S8_F4 = 0x35, IMG_DATA_FORMAT_FMASK32_S8_F8 = 0x36, IMG_DATA_FORMAT_FMASK64_S16_F4 = 0x37, IMG_DATA_FORMAT_FMASK64_S16_F8 = 0x38, IMG_DATA_FORMAT_4_4 = 0x39, IMG_DATA_FORMAT_6_5_5 = 0x3a, IMG_DATA_FORMAT_1 = 0x3b, IMG_DATA_FORMAT_1_REVERSED = 0x3c, IMG_DATA_FORMAT_32_AS_8 = 0x3d, IMG_DATA_FORMAT_32_AS_8_8 = 0x3e, IMG_DATA_FORMAT_32_AS_32_32_32_32 = 0x3f, } IMG_DATA_FORMAT; typedef enum BUF_NUM_FORMAT { BUF_NUM_FORMAT_UNORM = 0x0, BUF_NUM_FORMAT_SNORM = 0x1, BUF_NUM_FORMAT_USCALED = 0x2, BUF_NUM_FORMAT_SSCALED = 0x3, BUF_NUM_FORMAT_UINT = 0x4, BUF_NUM_FORMAT_SINT = 0x5, BUF_NUM_FORMAT_RESERVED_6 = 0x6, BUF_NUM_FORMAT_FLOAT = 0x7, } BUF_NUM_FORMAT; typedef enum IMG_NUM_FORMAT { IMG_NUM_FORMAT_UNORM = 0x0, IMG_NUM_FORMAT_SNORM = 0x1, IMG_NUM_FORMAT_USCALED = 0x2, IMG_NUM_FORMAT_SSCALED = 0x3, IMG_NUM_FORMAT_UINT = 0x4, IMG_NUM_FORMAT_SINT = 0x5, IMG_NUM_FORMAT_RESERVED_6 = 0x6, IMG_NUM_FORMAT_FLOAT = 0x7, IMG_NUM_FORMAT_RESERVED_8 = 0x8, IMG_NUM_FORMAT_SRGB = 0x9, IMG_NUM_FORMAT_RESERVED_10 = 0xa, IMG_NUM_FORMAT_RESERVED_11 = 0xb, IMG_NUM_FORMAT_RESERVED_12 = 0xc, IMG_NUM_FORMAT_RESERVED_13 = 0xd, IMG_NUM_FORMAT_RESERVED_14 = 0xe, IMG_NUM_FORMAT_RESERVED_15 = 0xf, } IMG_NUM_FORMAT; typedef enum TileType { ARRAY_COLOR_TILE = 0x0, ARRAY_DEPTH_TILE = 0x1, } TileType; typedef enum NonDispTilingOrder { ADDR_SURF_MICRO_TILING_DISPLAY = 0x0, ADDR_SURF_MICRO_TILING_NON_DISPLAY = 0x1, } NonDispTilingOrder; typedef enum MicroTileMode { ADDR_SURF_DISPLAY_MICRO_TILING = 0x0, ADDR_SURF_THIN_MICRO_TILING = 0x1, ADDR_SURF_DEPTH_MICRO_TILING = 0x2, ADDR_SURF_ROTATED_MICRO_TILING = 0x3, ADDR_SURF_THICK_MICRO_TILING = 0x4, } MicroTileMode; typedef enum TileSplit { ADDR_SURF_TILE_SPLIT_64B = 0x0, ADDR_SURF_TILE_SPLIT_128B = 0x1, ADDR_SURF_TILE_SPLIT_256B = 0x2, ADDR_SURF_TILE_SPLIT_512B = 0x3, ADDR_SURF_TILE_SPLIT_1KB = 0x4, ADDR_SURF_TILE_SPLIT_2KB = 0x5, ADDR_SURF_TILE_SPLIT_4KB = 0x6, } TileSplit; typedef enum SampleSplit { ADDR_SURF_SAMPLE_SPLIT_1 = 0x0, ADDR_SURF_SAMPLE_SPLIT_2 = 0x1, ADDR_SURF_SAMPLE_SPLIT_4 = 0x2, ADDR_SURF_SAMPLE_SPLIT_8 = 0x3, } SampleSplit; typedef enum PipeConfig { ADDR_SURF_P2 = 0x0, ADDR_SURF_P2_RESERVED0 = 0x1, ADDR_SURF_P2_RESERVED1 = 0x2, ADDR_SURF_P2_RESERVED2 = 0x3, ADDR_SURF_P4_8x16 = 0x4, ADDR_SURF_P4_16x16 = 0x5, ADDR_SURF_P4_16x32 = 0x6, ADDR_SURF_P4_32x32 = 0x7, ADDR_SURF_P8_16x16_8x16 = 0x8, ADDR_SURF_P8_16x32_8x16 = 0x9, ADDR_SURF_P8_32x32_8x16 = 0xa, ADDR_SURF_P8_16x32_16x16 = 0xb, ADDR_SURF_P8_32x32_16x16 = 0xc, ADDR_SURF_P8_32x32_16x32 = 0xd, ADDR_SURF_P8_32x64_32x32 = 0xe, ADDR_SURF_P8_RESERVED0 = 0xf, ADDR_SURF_P16_32x32_8x16 = 0x10, ADDR_SURF_P16_32x32_16x16 = 0x11, } PipeConfig; typedef enum NumBanks { ADDR_SURF_2_BANK = 0x0, ADDR_SURF_4_BANK = 0x1, ADDR_SURF_8_BANK = 0x2, ADDR_SURF_16_BANK = 0x3, } NumBanks; typedef enum BankWidth { ADDR_SURF_BANK_WIDTH_1 = 0x0, ADDR_SURF_BANK_WIDTH_2 = 0x1, ADDR_SURF_BANK_WIDTH_4 = 0x2, ADDR_SURF_BANK_WIDTH_8 = 0x3, } BankWidth; typedef enum BankHeight { ADDR_SURF_BANK_HEIGHT_1 = 0x0, ADDR_SURF_BANK_HEIGHT_2 = 0x1, ADDR_SURF_BANK_HEIGHT_4 = 0x2, ADDR_SURF_BANK_HEIGHT_8 = 0x3, } BankHeight; typedef enum BankWidthHeight { ADDR_SURF_BANK_WH_1 = 0x0, ADDR_SURF_BANK_WH_2 = 0x1, ADDR_SURF_BANK_WH_4 = 0x2, ADDR_SURF_BANK_WH_8 = 0x3, } BankWidthHeight; typedef enum MacroTileAspect { ADDR_SURF_MACRO_ASPECT_1 = 0x0, ADDR_SURF_MACRO_ASPECT_2 = 0x1, ADDR_SURF_MACRO_ASPECT_4 = 0x2, ADDR_SURF_MACRO_ASPECT_8 = 0x3, } MacroTileAspect; typedef enum GATCL1RequestType { GATCL1_TYPE_NORMAL = 0x0, GATCL1_TYPE_SHOOTDOWN = 0x1, GATCL1_TYPE_BYPASS = 0x2, } GATCL1RequestType; typedef enum TCC_CACHE_POLICIES { TCC_CACHE_POLICY_LRU = 0x0, TCC_CACHE_POLICY_STREAM = 0x1, } TCC_CACHE_POLICIES; typedef enum MTYPE { MTYPE_NC_NV = 0x0, MTYPE_NC = 0x1, MTYPE_CC = 0x2, MTYPE_UC = 0x3, } MTYPE; typedef enum PERFMON_COUNTER_MODE { PERFMON_COUNTER_MODE_ACCUM = 0x0, PERFMON_COUNTER_MODE_ACTIVE_CYCLES = 0x1, PERFMON_COUNTER_MODE_MAX = 0x2, PERFMON_COUNTER_MODE_DIRTY = 0x3, PERFMON_COUNTER_MODE_SAMPLE = 0x4, PERFMON_COUNTER_MODE_CYCLES_SINCE_FIRST_EVENT = 0x5, PERFMON_COUNTER_MODE_CYCLES_SINCE_LAST_EVENT = 0x6, PERFMON_COUNTER_MODE_CYCLES_GE_HI = 0x7, PERFMON_COUNTER_MODE_CYCLES_EQ_HI = 0x8, PERFMON_COUNTER_MODE_INACTIVE_CYCLES = 0x9, PERFMON_COUNTER_MODE_RESERVED = 0xf, } PERFMON_COUNTER_MODE; typedef enum PERFMON_SPM_MODE { PERFMON_SPM_MODE_OFF = 0x0, PERFMON_SPM_MODE_16BIT_CLAMP = 0x1, PERFMON_SPM_MODE_16BIT_NO_CLAMP = 0x2, PERFMON_SPM_MODE_32BIT_CLAMP = 0x3, PERFMON_SPM_MODE_32BIT_NO_CLAMP = 0x4, PERFMON_SPM_MODE_RESERVED_5 = 0x5, PERFMON_SPM_MODE_RESERVED_6 = 0x6, PERFMON_SPM_MODE_RESERVED_7 = 0x7, PERFMON_SPM_MODE_TEST_MODE_0 = 0x8, PERFMON_SPM_MODE_TEST_MODE_1 = 0x9, PERFMON_SPM_MODE_TEST_MODE_2 = 0xa, } PERFMON_SPM_MODE; typedef enum SurfaceTiling { ARRAY_LINEAR = 0x0, ARRAY_TILED = 0x1, } SurfaceTiling; typedef enum SurfaceArray { ARRAY_1D = 0x0, ARRAY_2D = 0x1, ARRAY_3D = 0x2, ARRAY_3D_SLICE = 0x3, } SurfaceArray; typedef enum ColorArray { ARRAY_2D_ALT_COLOR = 0x0, ARRAY_2D_COLOR = 0x1, ARRAY_3D_SLICE_COLOR = 0x3, } ColorArray; typedef enum DepthArray { ARRAY_2D_ALT_DEPTH = 0x0, ARRAY_2D_DEPTH = 0x1, } DepthArray; typedef enum ENUM_NUM_SIMD_PER_CU { NUM_SIMD_PER_CU = 0x4, } ENUM_NUM_SIMD_PER_CU; typedef enum MEM_PWR_FORCE_CTRL { NO_FORCE_REQUEST = 0x0, FORCE_LIGHT_SLEEP_REQUEST = 0x1, FORCE_DEEP_SLEEP_REQUEST = 0x2, FORCE_SHUT_DOWN_REQUEST = 0x3, } MEM_PWR_FORCE_CTRL; typedef enum MEM_PWR_FORCE_CTRL2 { NO_FORCE_REQ = 0x0, FORCE_LIGHT_SLEEP_REQ = 0x1, } MEM_PWR_FORCE_CTRL2; typedef enum MEM_PWR_DIS_CTRL { ENABLE_MEM_PWR_CTRL = 0x0, DISABLE_MEM_PWR_CTRL = 0x1, } MEM_PWR_DIS_CTRL; typedef enum MEM_PWR_SEL_CTRL { DYNAMIC_SHUT_DOWN_ENABLE = 0x0, DYNAMIC_DEEP_SLEEP_ENABLE = 0x1, DYNAMIC_LIGHT_SLEEP_ENABLE = 0x2, } MEM_PWR_SEL_CTRL; typedef enum MEM_PWR_SEL_CTRL2 { DYNAMIC_DEEP_SLEEP_EN = 0x0, DYNAMIC_LIGHT_SLEEP_EN = 0x1, } MEM_PWR_SEL_CTRL2; #endif /* UVD_6_0_ENUM_H */
// SPDX-License-Identifier: GPL-2.0+ /* * originally written by: Kirk Reiser <[email protected]> * this version considerably modified by David Borowski, [email protected] * * Copyright (C) 1998-99 Kirk Reiser. * Copyright (C) 2003 David Borowski. * * specifically written as a driver for the speakup screenreview * s not a general device driver. */ #include <linux/unistd.h> #include <linux/proc_fs.h> #include <linux/jiffies.h> #include <linux/spinlock.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/kthread.h> #include "speakup.h" #include "spk_priv.h" #define DRV_VERSION "2.20" #define SYNTH_CLEAR 0x03 #define PROCSPEECH 0x0b static int xoff; static inline int synth_full(void) { return xoff; } static void do_catch_up(struct spk_synth *synth); static void synth_flush(struct spk_synth *synth); static void read_buff_add(u_char c); static unsigned char get_index(struct spk_synth *synth); static int in_escape; static int is_flushing; static DEFINE_SPINLOCK(flush_lock); static DECLARE_WAIT_QUEUE_HEAD(flush); enum default_vars_id { CAPS_START_ID = 0, CAPS_STOP_ID, RATE_ID, PITCH_ID, INFLECTION_ID, VOL_ID, PUNCT_ID, VOICE_ID, DIRECT_ID, V_LAST_VAR_ID, NB_ID, }; static struct var_t vars[NB_ID] = { [CAPS_START_ID] = { CAPS_START, .u.s = {"[:dv ap 160] " } }, [CAPS_STOP_ID] = { CAPS_STOP, .u.s = {"[:dv ap 100 ] " } }, [RATE_ID] = { RATE, .u.n = {"[:ra %d] ", 180, 75, 650, 0, 0, NULL } }, [PITCH_ID] = { PITCH, .u.n = {"[:dv ap %d] ", 122, 50, 350, 0, 0, NULL } }, [INFLECTION_ID] = { INFLECTION, .u.n = {"[:dv pr %d] ", 100, 0, 10000, 0, 0, NULL } }, [VOL_ID] = { VOL, .u.n = {"[:dv g5 %d] ", 86, 60, 86, 0, 0, NULL } }, [PUNCT_ID] = { PUNCT, .u.n = {"[:pu %c] ", 0, 0, 2, 0, 0, "nsa" } }, [VOICE_ID] = { VOICE, .u.n = {"[:n%c] ", 0, 0, 9, 0, 0, "phfdburwkv" } }, [DIRECT_ID] = { DIRECT, .u.n = {NULL, 0, 0, 1, 0, 0, NULL } }, V_LAST_VAR }; /* * These attributes will appear in /sys/accessibility/speakup/dectlk. */ static struct kobj_attribute caps_start_attribute = __ATTR(caps_start, 0644, spk_var_show, spk_var_store); static struct kobj_attribute caps_stop_attribute = __ATTR(caps_stop, 0644, spk_var_show, spk_var_store); static struct kobj_attribute pitch_attribute = __ATTR(pitch, 0644, spk_var_show, spk_var_store); static struct kobj_attribute inflection_attribute = __ATTR(inflection, 0644, spk_var_show, spk_var_store); static struct kobj_attribute punct_attribute = __ATTR(punct, 0644, spk_var_show, spk_var_store); static struct kobj_attribute rate_attribute = __ATTR(rate, 0644, spk_var_show, spk_var_store); static struct kobj_attribute voice_attribute = __ATTR(voice, 0644, spk_var_show, spk_var_store); static struct kobj_attribute vol_attribute = __ATTR(vol, 0644, spk_var_show, spk_var_store); static struct kobj_attribute delay_time_attribute = __ATTR(delay_time, 0644, spk_var_show, spk_var_store); static struct kobj_attribute direct_attribute = __ATTR(direct, 0644, spk_var_show, spk_var_store); static struct kobj_attribute full_time_attribute = __ATTR(full_time, 0644, spk_var_show, spk_var_store); static struct kobj_attribute flush_time_attribute = __ATTR(flush_time, 0644, spk_var_show, spk_var_store); static struct kobj_attribute jiffy_delta_attribute = __ATTR(jiffy_delta, 0644, spk_var_show, spk_var_store); static struct kobj_attribute trigger_time_attribute = __ATTR(trigger_time, 0644, spk_var_show, spk_var_store); /* * Create a group of attributes so that we can create and destroy them all * at once. */ static struct attribute *synth_attrs[] = { &caps_start_attribute.attr, &caps_stop_attribute.attr, &pitch_attribute.attr, &inflection_attribute.attr, &punct_attribute.attr, &rate_attribute.attr, &voice_attribute.attr, &vol_attribute.attr, &delay_time_attribute.attr, &direct_attribute.attr, &full_time_attribute.attr, &flush_time_attribute.attr, &jiffy_delta_attribute.attr, &trigger_time_attribute.attr, NULL, /* need to NULL terminate the list of attributes */ }; static int ap_defaults[] = {122, 89, 155, 110, 208, 240, 200, 106, 306}; static int g5_defaults[] = {86, 81, 86, 84, 81, 80, 83, 83, 73}; static struct spk_synth synth_dectlk = { .name = "dectlk", .version = DRV_VERSION, .long_name = "Dectalk Express", .init = "[:error sp :name paul :rate 180 :tsr off] ", .procspeech = PROCSPEECH, .clear = SYNTH_CLEAR, .delay = 500, .trigger = 50, .jiffies = 50, .full = 40000, .flush_time = 4000, .dev_name = SYNTH_DEFAULT_DEV, .startup = SYNTH_START, .checkval = SYNTH_CHECK, .vars = vars, .default_pitch = ap_defaults, .default_vol = g5_defaults, .io_ops = &spk_ttyio_ops, .probe = spk_ttyio_synth_probe, .release = spk_ttyio_release, .synth_immediate = spk_ttyio_synth_immediate, .catch_up = do_catch_up, .flush = synth_flush, .is_alive = spk_synth_is_alive_restart, .synth_adjust = NULL, .read_buff_add = read_buff_add, .get_index = get_index, .indexing = { .command = "[:in re %d ] ", .lowindex = 1, .highindex = 8, .currindex = 1, }, .attributes = { .attrs = synth_attrs, .name = "dectlk", }, }; static int is_indnum(u_char *ch) { if ((*ch >= '0') && (*ch <= '9')) { *ch = *ch - '0'; return 1; } return 0; } static u_char lastind; static unsigned char get_index(struct spk_synth *synth) { u_char rv; rv = lastind; lastind = 0; return rv; } static void read_buff_add(u_char c) { static int ind = -1; if (c == 0x01) { unsigned long flags; spin_lock_irqsave(&flush_lock, flags); is_flushing = 0; wake_up_interruptible(&flush); spin_unlock_irqrestore(&flush_lock, flags); } else if (c == 0x13) { xoff = 1; } else if (c == 0x11) { xoff = 0; } else if (is_indnum(&c)) { if (ind == -1) ind = c; else ind = ind * 10 + c; } else if ((c > 31) && (c < 127)) { if (ind != -1) lastind = (u_char)ind; ind = -1; } } static void do_catch_up(struct spk_synth *synth) { int synth_full_val = 0; static u_char ch; static u_char last = '\0'; unsigned long flags; unsigned long jiff_max; unsigned long timeout; DEFINE_WAIT(wait); struct var_t *jiffy_delta; struct var_t *delay_time; struct var_t *flush_time; int jiffy_delta_val; int delay_time_val; int timeout_val; jiffy_delta = spk_get_var(JIFFY); delay_time = spk_get_var(DELAY); flush_time = spk_get_var(FLUSH); spin_lock_irqsave(&speakup_info.spinlock, flags); jiffy_delta_val = jiffy_delta->u.n.value; timeout_val = flush_time->u.n.value; spin_unlock_irqrestore(&speakup_info.spinlock, flags); timeout = msecs_to_jiffies(timeout_val); jiff_max = jiffies + jiffy_delta_val; while (!kthread_should_stop()) { /* if no ctl-a in 4, send data anyway */ spin_lock_irqsave(&flush_lock, flags); while (is_flushing && timeout) { prepare_to_wait(&flush, &wait, TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&flush_lock, flags); timeout = schedule_timeout(timeout); spin_lock_irqsave(&flush_lock, flags); } finish_wait(&flush, &wait); is_flushing = 0; spin_unlock_irqrestore(&flush_lock, flags); spin_lock_irqsave(&speakup_info.spinlock, flags); if (speakup_info.flushing) { speakup_info.flushing = 0; spin_unlock_irqrestore(&speakup_info.spinlock, flags); synth->flush(synth); continue; } synth_buffer_skip_nonlatin1(); if (synth_buffer_empty()) { spin_unlock_irqrestore(&speakup_info.spinlock, flags); break; } ch = synth_buffer_peek(); set_current_state(TASK_INTERRUPTIBLE); delay_time_val = delay_time->u.n.value; synth_full_val = synth_full(); spin_unlock_irqrestore(&speakup_info.spinlock, flags); if (ch == '\n') ch = 0x0D; if (synth_full_val || !synth->io_ops->synth_out(synth, ch)) { schedule_timeout(msecs_to_jiffies(delay_time_val)); continue; } set_current_state(TASK_RUNNING); spin_lock_irqsave(&speakup_info.spinlock, flags); synth_buffer_getc(); spin_unlock_irqrestore(&speakup_info.spinlock, flags); if (ch == '[') { in_escape = 1; } else if (ch == ']') { in_escape = 0; } else if (ch <= SPACE) { if (!in_escape && strchr(",.!?;:", last)) synth->io_ops->synth_out(synth, PROCSPEECH); if (time_after_eq(jiffies, jiff_max)) { if (!in_escape) synth->io_ops->synth_out(synth, PROCSPEECH); spin_lock_irqsave(&speakup_info.spinlock, flags); jiffy_delta_val = jiffy_delta->u.n.value; delay_time_val = delay_time->u.n.value; spin_unlock_irqrestore(&speakup_info.spinlock, flags); schedule_timeout(msecs_to_jiffies (delay_time_val)); jiff_max = jiffies + jiffy_delta_val; } } last = ch; } if (!in_escape) synth->io_ops->synth_out(synth, PROCSPEECH); } static void synth_flush(struct spk_synth *synth) { if (in_escape) /* if in command output ']' so we don't get an error */ synth->io_ops->synth_out(synth, ']'); in_escape = 0; is_flushing = 1; synth->io_ops->flush_buffer(synth); synth->io_ops->synth_out(synth, SYNTH_CLEAR); } module_param_named(ser, synth_dectlk.ser, int, 0444); module_param_named(dev, synth_dectlk.dev_name, charp, 0444); module_param_named(start, synth_dectlk.startup, short, 0444); module_param_named(rate, vars[RATE_ID].u.n.default_val, int, 0444); module_param_named(pitch, vars[PITCH_ID].u.n.default_val, int, 0444); module_param_named(inflection, vars[INFLECTION_ID].u.n.default_val, int, 0444); module_param_named(vol, vars[VOL_ID].u.n.default_val, int, 0444); module_param_named(punct, vars[PUNCT_ID].u.n.default_val, int, 0444); module_param_named(voice, vars[VOICE_ID].u.n.default_val, int, 0444); module_param_named(direct, vars[DIRECT_ID].u.n.default_val, int, 0444); MODULE_PARM_DESC(ser, "Set the serial port for the synthesizer (0-based)."); MODULE_PARM_DESC(dev, "Set the device e.g. ttyUSB0, for the synthesizer."); MODULE_PARM_DESC(start, "Start the synthesizer once it is loaded."); MODULE_PARM_DESC(rate, "Set the rate variable on load."); MODULE_PARM_DESC(pitch, "Set the pitch variable on load."); MODULE_PARM_DESC(inflection, "Set the inflection variable on load."); MODULE_PARM_DESC(vol, "Set the vol variable on load."); MODULE_PARM_DESC(punct, "Set the punct variable on load."); MODULE_PARM_DESC(voice, "Set the voice variable on load."); MODULE_PARM_DESC(direct, "Set the direct variable on load."); module_spk_synth(synth_dectlk); MODULE_AUTHOR("Kirk Reiser <[email protected]>"); MODULE_AUTHOR("David Borowski"); MODULE_DESCRIPTION("Speakup support for DECtalk Express synthesizers"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
// SPDX-License-Identifier: GPL-2.0-or-later /* * HID driver for Nintendo Wii / Wii U peripherals * Copyright (c) 2011-2013 David Herrmann <[email protected]> */ /* */ #include <linux/completion.h> #include <linux/device.h> #include <linux/hid.h> #include <linux/input.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include "hid-ids.h" #include "hid-wiimote.h" /* output queue handling */ static int wiimote_hid_send(struct hid_device *hdev, __u8 *buffer, size_t count) { __u8 *buf; int ret; if (!hdev->ll_driver->output_report) return -ENODEV; buf = kmemdup(buffer, count, GFP_KERNEL); if (!buf) return -ENOMEM; ret = hid_hw_output_report(hdev, buf, count); kfree(buf); return ret; } static void wiimote_queue_worker(struct work_struct *work) { struct wiimote_queue *queue = container_of(work, struct wiimote_queue, worker); struct wiimote_data *wdata = container_of(queue, struct wiimote_data, queue); unsigned long flags; int ret; spin_lock_irqsave(&wdata->queue.lock, flags); while (wdata->queue.head != wdata->queue.tail) { spin_unlock_irqrestore(&wdata->queue.lock, flags); ret = wiimote_hid_send(wdata->hdev, wdata->queue.outq[wdata->queue.tail].data, wdata->queue.outq[wdata->queue.tail].size); if (ret < 0) { spin_lock_irqsave(&wdata->state.lock, flags); wiimote_cmd_abort(wdata); spin_unlock_irqrestore(&wdata->state.lock, flags); } spin_lock_irqsave(&wdata->queue.lock, flags); wdata->queue.tail = (wdata->queue.tail + 1) % WIIMOTE_BUFSIZE; } spin_unlock_irqrestore(&wdata->queue.lock, flags); } static void wiimote_queue(struct wiimote_data *wdata, const __u8 *buffer, size_t count) { unsigned long flags; __u8 newhead; if (count > HID_MAX_BUFFER_SIZE) { hid_warn(wdata->hdev, "Sending too large output report\n"); spin_lock_irqsave(&wdata->queue.lock, flags); goto out_error; } /* * Copy new request into our output queue and check whether the * queue is full. If it is full, discard this request. * If it is empty we need to start a new worker that will * send out the buffer to the hid device. * If the queue is not empty, then there must be a worker * that is currently sending out our buffer and this worker * will reschedule itself until the queue is empty. */ spin_lock_irqsave(&wdata->queue.lock, flags); memcpy(wdata->queue.outq[wdata->queue.head].data, buffer, count); wdata->queue.outq[wdata->queue.head].size = count; newhead = (wdata->queue.head + 1) % WIIMOTE_BUFSIZE; if (wdata->queue.head == wdata->queue.tail) { wdata->queue.head = newhead; schedule_work(&wdata->queue.worker); } else if (newhead != wdata->queue.tail) { wdata->queue.head = newhead; } else { hid_warn(wdata->hdev, "Output queue is full"); goto out_error; } goto out_unlock; out_error: wiimote_cmd_abort(wdata); out_unlock: spin_unlock_irqrestore(&wdata->queue.lock, flags); } /* * This sets the rumble bit on the given output report if rumble is * currently enabled. * \cmd1 must point to the second byte in the output report => &cmd[1] * This must be called on nearly every output report before passing it * into the output queue! */ static inline void wiiproto_keep_rumble(struct wiimote_data *wdata, __u8 *cmd1) { if (wdata->state.flags & WIIPROTO_FLAG_RUMBLE) *cmd1 |= 0x01; } void wiiproto_req_rumble(struct wiimote_data *wdata, __u8 rumble) { __u8 cmd[2]; rumble = !!rumble; if (rumble == !!(wdata->state.flags & WIIPROTO_FLAG_RUMBLE)) return; if (rumble) wdata->state.flags |= WIIPROTO_FLAG_RUMBLE; else wdata->state.flags &= ~WIIPROTO_FLAG_RUMBLE; cmd[0] = WIIPROTO_REQ_RUMBLE; cmd[1] = 0; wiiproto_keep_rumble(wdata, &cmd[1]); wiimote_queue(wdata, cmd, sizeof(cmd)); } void wiiproto_req_leds(struct wiimote_data *wdata, int leds) { __u8 cmd[2]; leds &= WIIPROTO_FLAGS_LEDS; if ((wdata->state.flags & WIIPROTO_FLAGS_LEDS) == leds) return; wdata->state.flags = (wdata->state.flags & ~WIIPROTO_FLAGS_LEDS) | leds; cmd[0] = WIIPROTO_REQ_LED; cmd[1] = 0; if (leds & WIIPROTO_FLAG_LED1) cmd[1] |= 0x10; if (leds & WIIPROTO_FLAG_LED2) cmd[1] |= 0x20; if (leds & WIIPROTO_FLAG_LED3) cmd[1] |= 0x40; if (leds & WIIPROTO_FLAG_LED4) cmd[1] |= 0x80; wiiproto_keep_rumble(wdata, &cmd[1]); wiimote_queue(wdata, cmd, sizeof(cmd)); } /* * Check what peripherals of the wiimote are currently * active and select a proper DRM that supports all of * the requested data inputs. * * Not all combinations are actually supported. The following * combinations work only with limitations: * - IR cam in extended or full mode disables any data transmission * of extension controllers. There is no DRM mode that supports * extension bytes plus extended/full IR. * - IR cam with accelerometer and extension *_EXT8 is not supported. * However, all extensions that need *_EXT8 are devices that don't * support IR cameras. Hence, this shouldn't happen under normal * operation. * - *_EXT16 is only supported in combination with buttons and * accelerometer. No IR or similar can be active simultaneously. As * above, all modules that require it are mutually exclusive with * IR/etc. so this doesn't matter. */ static __u8 select_drm(struct wiimote_data *wdata) { __u8 ir = wdata->state.flags & WIIPROTO_FLAGS_IR; bool ext; ext = (wdata->state.flags & WIIPROTO_FLAG_EXT_USED) || (wdata->state.flags & WIIPROTO_FLAG_MP_USED); /* some 3rd-party balance-boards are hard-coded to KEE, *sigh* */ if (wdata->state.devtype == WIIMOTE_DEV_BALANCE_BOARD) { if (ext) return WIIPROTO_REQ_DRM_KEE; else return WIIPROTO_REQ_DRM_K; } if (ir == WIIPROTO_FLAG_IR_BASIC) { if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) { /* GEN10 and ealier devices bind IR formats to DRMs. * Hence, we cannot use DRM_KAI here as it might be * bound to IR_EXT. Use DRM_KAIE unconditionally so we * work with all devices and our parsers can use the * fixed formats, too. */ return WIIPROTO_REQ_DRM_KAIE; } else { return WIIPROTO_REQ_DRM_KIE; } } else if (ir == WIIPROTO_FLAG_IR_EXT) { return WIIPROTO_REQ_DRM_KAI; } else if (ir == WIIPROTO_FLAG_IR_FULL) { return WIIPROTO_REQ_DRM_SKAI1; } else { if (wdata->state.flags & WIIPROTO_FLAG_ACCEL) { if (ext) return WIIPROTO_REQ_DRM_KAE; else return WIIPROTO_REQ_DRM_KA; } else { if (ext) return WIIPROTO_REQ_DRM_KEE; else return WIIPROTO_REQ_DRM_K; } } } void wiiproto_req_drm(struct wiimote_data *wdata, __u8 drm) { __u8 cmd[3]; if (wdata->state.flags & WIIPROTO_FLAG_DRM_LOCKED) drm = wdata->state.drm; else if (drm == WIIPROTO_REQ_NULL) drm = select_drm(wdata); cmd[0] = WIIPROTO_REQ_DRM; cmd[1] = 0; cmd[2] = drm; wdata->state.drm = drm; wiiproto_keep_rumble(wdata, &cmd[1]); wiimote_queue(wdata, cmd, sizeof(cmd)); } void wiiproto_req_status(struct wiimote_data *wdata) { __u8 cmd[2]; cmd[0] = WIIPROTO_REQ_SREQ; cmd[1] = 0; wiiproto_keep_rumble(wdata, &cmd[1]); wiimote_queue(wdata, cmd, sizeof(cmd)); } void wiiproto_req_accel(struct wiimote_data *wdata, __u8 accel) { accel = !!accel; if (accel == !!(wdata->state.flags & WIIPROTO_FLAG_ACCEL)) return; if (accel) wdata->state.flags |= WIIPROTO_FLAG_ACCEL; else wdata->state.flags &= ~WIIPROTO_FLAG_ACCEL; wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); } void wiiproto_req_ir1(struct wiimote_data *wdata, __u8 flags) { __u8 cmd[2]; cmd[0] = WIIPROTO_REQ_IR1; cmd[1] = flags; wiiproto_keep_rumble(wdata, &cmd[1]); wiimote_queue(wdata, cmd, sizeof(cmd)); } void wiiproto_req_ir2(struct wiimote_data *wdata, __u8 flags) { __u8 cmd[2]; cmd[0] = WIIPROTO_REQ_IR2; cmd[1] = flags; wiiproto_keep_rumble(wdata, &cmd[1]); wiimote_queue(wdata, cmd, sizeof(cmd)); } #define wiiproto_req_wreg(wdata, os, buf, sz) \ wiiproto_req_wmem((wdata), false, (os), (buf), (sz)) #define wiiproto_req_weeprom(wdata, os, buf, sz) \ wiiproto_req_wmem((wdata), true, (os), (buf), (sz)) static void wiiproto_req_wmem(struct wiimote_data *wdata, bool eeprom, __u32 offset, const __u8 *buf, __u8 size) { __u8 cmd[22]; if (size > 16 || size == 0) { hid_warn(wdata->hdev, "Invalid length %d wmem request\n", size); return; } memset(cmd, 0, sizeof(cmd)); cmd[0] = WIIPROTO_REQ_WMEM; cmd[2] = (offset >> 16) & 0xff; cmd[3] = (offset >> 8) & 0xff; cmd[4] = offset & 0xff; cmd[5] = size; memcpy(&cmd[6], buf, size); if (!eeprom) cmd[1] |= 0x04; wiiproto_keep_rumble(wdata, &cmd[1]); wiimote_queue(wdata, cmd, sizeof(cmd)); } void wiiproto_req_rmem(struct wiimote_data *wdata, bool eeprom, __u32 offset, __u16 size) { __u8 cmd[7]; if (size == 0) { hid_warn(wdata->hdev, "Invalid length %d rmem request\n", size); return; } cmd[0] = WIIPROTO_REQ_RMEM; cmd[1] = 0; cmd[2] = (offset >> 16) & 0xff; cmd[3] = (offset >> 8) & 0xff; cmd[4] = offset & 0xff; cmd[5] = (size >> 8) & 0xff; cmd[6] = size & 0xff; if (!eeprom) cmd[1] |= 0x04; wiiproto_keep_rumble(wdata, &cmd[1]); wiimote_queue(wdata, cmd, sizeof(cmd)); } /* requries the cmd-mutex to be held */ int wiimote_cmd_write(struct wiimote_data *wdata, __u32 offset, const __u8 *wmem, __u8 size) { unsigned long flags; int ret; spin_lock_irqsave(&wdata->state.lock, flags); wiimote_cmd_set(wdata, WIIPROTO_REQ_WMEM, 0); wiiproto_req_wreg(wdata, offset, wmem, size); spin_unlock_irqrestore(&wdata->state.lock, flags); ret = wiimote_cmd_wait(wdata); if (!ret && wdata->state.cmd_err) ret = -EIO; return ret; } /* requries the cmd-mutex to be held */ ssize_t wiimote_cmd_read(struct wiimote_data *wdata, __u32 offset, __u8 *rmem, __u8 size) { unsigned long flags; ssize_t ret; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.cmd_read_size = size; wdata->state.cmd_read_buf = rmem; wiimote_cmd_set(wdata, WIIPROTO_REQ_RMEM, offset & 0xffff); wiiproto_req_rreg(wdata, offset, size); spin_unlock_irqrestore(&wdata->state.lock, flags); ret = wiimote_cmd_wait(wdata); spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.cmd_read_buf = NULL; spin_unlock_irqrestore(&wdata->state.lock, flags); if (!ret) { if (wdata->state.cmd_read_size == 0) ret = -EIO; else ret = wdata->state.cmd_read_size; } return ret; } /* requires the cmd-mutex to be held */ static int wiimote_cmd_init_ext(struct wiimote_data *wdata) { __u8 wmem; int ret; /* initialize extension */ wmem = 0x55; ret = wiimote_cmd_write(wdata, 0xa400f0, &wmem, sizeof(wmem)); if (ret) return ret; /* disable default encryption */ wmem = 0x0; ret = wiimote_cmd_write(wdata, 0xa400fb, &wmem, sizeof(wmem)); if (ret) return ret; return 0; } /* requires the cmd-mutex to be held */ static __u8 wiimote_cmd_read_ext(struct wiimote_data *wdata, __u8 *rmem) { int ret; /* read extension ID */ ret = wiimote_cmd_read(wdata, 0xa400fa, rmem, 6); if (ret != 6) return WIIMOTE_EXT_NONE; hid_dbg(wdata->hdev, "extension ID: %6phC\n", rmem); if (rmem[0] == 0xff && rmem[1] == 0xff && rmem[2] == 0xff && rmem[3] == 0xff && rmem[4] == 0xff && rmem[5] == 0xff) return WIIMOTE_EXT_NONE; if (rmem[4] == 0x00 && rmem[5] == 0x00) return WIIMOTE_EXT_NUNCHUK; if (rmem[4] == 0x01 && rmem[5] == 0x01) return WIIMOTE_EXT_CLASSIC_CONTROLLER; if (rmem[4] == 0x04 && rmem[5] == 0x02) return WIIMOTE_EXT_BALANCE_BOARD; if (rmem[4] == 0x01 && rmem[5] == 0x20) return WIIMOTE_EXT_PRO_CONTROLLER; if (rmem[0] == 0x01 && rmem[1] == 0x00 && rmem[4] == 0x01 && rmem[5] == 0x03) return WIIMOTE_EXT_DRUMS; if (rmem[0] == 0x00 && rmem[1] == 0x00 && rmem[4] == 0x01 && rmem[5] == 0x03) return WIIMOTE_EXT_GUITAR; if (rmem[0] == 0x03 && rmem[1] == 0x00 && rmem[4] == 0x01 && rmem[5] == 0x03) return WIIMOTE_EXT_TURNTABLE; return WIIMOTE_EXT_UNKNOWN; } /* requires the cmd-mutex to be held */ static int wiimote_cmd_init_mp(struct wiimote_data *wdata) { __u8 wmem; int ret; /* initialize MP */ wmem = 0x55; ret = wiimote_cmd_write(wdata, 0xa600f0, &wmem, sizeof(wmem)); if (ret) return ret; /* disable default encryption */ wmem = 0x0; ret = wiimote_cmd_write(wdata, 0xa600fb, &wmem, sizeof(wmem)); if (ret) return ret; return 0; } /* requires the cmd-mutex to be held */ static bool wiimote_cmd_map_mp(struct wiimote_data *wdata, __u8 exttype) { __u8 wmem; /* map MP with correct pass-through mode */ switch (exttype) { case WIIMOTE_EXT_CLASSIC_CONTROLLER: case WIIMOTE_EXT_DRUMS: case WIIMOTE_EXT_GUITAR: wmem = 0x07; break; case WIIMOTE_EXT_TURNTABLE: case WIIMOTE_EXT_NUNCHUK: wmem = 0x05; break; default: wmem = 0x04; break; } return wiimote_cmd_write(wdata, 0xa600fe, &wmem, sizeof(wmem)); } /* requires the cmd-mutex to be held */ static bool wiimote_cmd_read_mp(struct wiimote_data *wdata, __u8 *rmem) { int ret; /* read motion plus ID */ ret = wiimote_cmd_read(wdata, 0xa600fa, rmem, 6); if (ret != 6) return false; hid_dbg(wdata->hdev, "motion plus ID: %6phC\n", rmem); if (rmem[5] == 0x05) return true; hid_info(wdata->hdev, "unknown motion plus ID: %6phC\n", rmem); return false; } /* requires the cmd-mutex to be held */ static __u8 wiimote_cmd_read_mp_mapped(struct wiimote_data *wdata) { int ret; __u8 rmem[6]; /* read motion plus ID */ ret = wiimote_cmd_read(wdata, 0xa400fa, rmem, 6); if (ret != 6) return WIIMOTE_MP_NONE; hid_dbg(wdata->hdev, "mapped motion plus ID: %6phC\n", rmem); if (rmem[0] == 0xff && rmem[1] == 0xff && rmem[2] == 0xff && rmem[3] == 0xff && rmem[4] == 0xff && rmem[5] == 0xff) return WIIMOTE_MP_NONE; if (rmem[4] == 0x04 && rmem[5] == 0x05) return WIIMOTE_MP_SINGLE; else if (rmem[4] == 0x05 && rmem[5] == 0x05) return WIIMOTE_MP_PASSTHROUGH_NUNCHUK; else if (rmem[4] == 0x07 && rmem[5] == 0x05) return WIIMOTE_MP_PASSTHROUGH_CLASSIC; return WIIMOTE_MP_UNKNOWN; } /* device module handling */ static const __u8 * const wiimote_devtype_mods[WIIMOTE_DEV_NUM] = { [WIIMOTE_DEV_PENDING] = (const __u8[]){ WIIMOD_NULL, }, [WIIMOTE_DEV_UNKNOWN] = (const __u8[]){ WIIMOD_NO_MP, WIIMOD_NULL, }, [WIIMOTE_DEV_GENERIC] = (const __u8[]){ WIIMOD_KEYS, WIIMOD_RUMBLE, WIIMOD_BATTERY, WIIMOD_LED1, WIIMOD_LED2, WIIMOD_LED3, WIIMOD_LED4, WIIMOD_ACCEL, WIIMOD_IR, WIIMOD_NULL, }, [WIIMOTE_DEV_GEN10] = (const __u8[]){ WIIMOD_KEYS, WIIMOD_RUMBLE, WIIMOD_BATTERY, WIIMOD_LED1, WIIMOD_LED2, WIIMOD_LED3, WIIMOD_LED4, WIIMOD_ACCEL, WIIMOD_IR, WIIMOD_NULL, }, [WIIMOTE_DEV_GEN20] = (const __u8[]){ WIIMOD_KEYS, WIIMOD_RUMBLE, WIIMOD_BATTERY, WIIMOD_LED1, WIIMOD_LED2, WIIMOD_LED3, WIIMOD_LED4, WIIMOD_ACCEL, WIIMOD_IR, WIIMOD_BUILTIN_MP, WIIMOD_NULL, }, [WIIMOTE_DEV_BALANCE_BOARD] = (const __u8[]) { WIIMOD_BATTERY, WIIMOD_LED1, WIIMOD_NO_MP, WIIMOD_NULL, }, [WIIMOTE_DEV_PRO_CONTROLLER] = (const __u8[]) { WIIMOD_BATTERY, WIIMOD_LED1, WIIMOD_LED2, WIIMOD_LED3, WIIMOD_LED4, WIIMOD_NO_MP, WIIMOD_NULL, }, }; static void wiimote_modules_load(struct wiimote_data *wdata, unsigned int devtype) { bool need_input = false; const __u8 *mods, *iter; const struct wiimod_ops *ops; int ret; mods = wiimote_devtype_mods[devtype]; for (iter = mods; *iter != WIIMOD_NULL; ++iter) { if (wiimod_table[*iter]->flags & WIIMOD_FLAG_INPUT) { need_input = true; break; } } if (need_input) { wdata->input = input_allocate_device(); if (!wdata->input) return; input_set_drvdata(wdata->input, wdata); wdata->input->dev.parent = &wdata->hdev->dev; wdata->input->id.bustype = wdata->hdev->bus; wdata->input->id.vendor = wdata->hdev->vendor; wdata->input->id.product = wdata->hdev->product; wdata->input->id.version = wdata->hdev->version; wdata->input->name = WIIMOTE_NAME; } for (iter = mods; *iter != WIIMOD_NULL; ++iter) { ops = wiimod_table[*iter]; if (!ops->probe) continue; ret = ops->probe(ops, wdata); if (ret) goto error; } if (wdata->input) { ret = input_register_device(wdata->input); if (ret) goto error; } spin_lock_irq(&wdata->state.lock); wdata->state.devtype = devtype; spin_unlock_irq(&wdata->state.lock); return; error: for ( ; iter-- != mods; ) { ops = wiimod_table[*iter]; if (ops->remove) ops->remove(ops, wdata); } if (wdata->input) { input_free_device(wdata->input); wdata->input = NULL; } } static void wiimote_modules_unload(struct wiimote_data *wdata) { const __u8 *mods, *iter; const struct wiimod_ops *ops; unsigned long flags; mods = wiimote_devtype_mods[wdata->state.devtype]; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.devtype = WIIMOTE_DEV_UNKNOWN; spin_unlock_irqrestore(&wdata->state.lock, flags); /* find end of list */ for (iter = mods; *iter != WIIMOD_NULL; ++iter) /* empty */ ; if (wdata->input) { input_get_device(wdata->input); input_unregister_device(wdata->input); } for ( ; iter-- != mods; ) { ops = wiimod_table[*iter]; if (ops->remove) ops->remove(ops, wdata); } if (wdata->input) { input_put_device(wdata->input); wdata->input = NULL; } } /* device extension handling */ static void wiimote_ext_load(struct wiimote_data *wdata, unsigned int ext) { unsigned long flags; const struct wiimod_ops *ops; int ret; ops = wiimod_ext_table[ext]; if (ops->probe) { ret = ops->probe(ops, wdata); if (ret) ext = WIIMOTE_EXT_UNKNOWN; } spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.exttype = ext; spin_unlock_irqrestore(&wdata->state.lock, flags); } static void wiimote_ext_unload(struct wiimote_data *wdata) { unsigned long flags; const struct wiimod_ops *ops; ops = wiimod_ext_table[wdata->state.exttype]; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.exttype = WIIMOTE_EXT_UNKNOWN; wdata->state.flags &= ~WIIPROTO_FLAG_EXT_USED; spin_unlock_irqrestore(&wdata->state.lock, flags); if (ops->remove) ops->remove(ops, wdata); } static void wiimote_mp_load(struct wiimote_data *wdata) { unsigned long flags; const struct wiimod_ops *ops; int ret; __u8 mode = 2; ops = &wiimod_mp; if (ops->probe) { ret = ops->probe(ops, wdata); if (ret) mode = 1; } spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.mp = mode; spin_unlock_irqrestore(&wdata->state.lock, flags); } static void wiimote_mp_unload(struct wiimote_data *wdata) { unsigned long flags; const struct wiimod_ops *ops; if (wdata->state.mp < 2) return; ops = &wiimod_mp; spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.mp = 0; wdata->state.flags &= ~WIIPROTO_FLAG_MP_USED; spin_unlock_irqrestore(&wdata->state.lock, flags); if (ops->remove) ops->remove(ops, wdata); } /* device (re-)initialization and detection */ static const char *wiimote_devtype_names[WIIMOTE_DEV_NUM] = { [WIIMOTE_DEV_PENDING] = "Pending", [WIIMOTE_DEV_UNKNOWN] = "Unknown", [WIIMOTE_DEV_GENERIC] = "Generic", [WIIMOTE_DEV_GEN10] = "Nintendo Wii Remote (Gen 1)", [WIIMOTE_DEV_GEN20] = "Nintendo Wii Remote Plus (Gen 2)", [WIIMOTE_DEV_BALANCE_BOARD] = "Nintendo Wii Balance Board", [WIIMOTE_DEV_PRO_CONTROLLER] = "Nintendo Wii U Pro Controller", }; /* Try to guess the device type based on all collected information. We * first try to detect by static extension types, then VID/PID and the * device name. If we cannot detect the device, we use * WIIMOTE_DEV_GENERIC so all modules will get probed on the device. */ static void wiimote_init_set_type(struct wiimote_data *wdata, __u8 exttype) { __u8 devtype = WIIMOTE_DEV_GENERIC; __u16 vendor, product; const char *name; vendor = wdata->hdev->vendor; product = wdata->hdev->product; name = wdata->hdev->name; if (exttype == WIIMOTE_EXT_BALANCE_BOARD) { devtype = WIIMOTE_DEV_BALANCE_BOARD; goto done; } else if (exttype == WIIMOTE_EXT_PRO_CONTROLLER) { devtype = WIIMOTE_DEV_PRO_CONTROLLER; goto done; } if (!strcmp(name, "Nintendo RVL-CNT-01")) { devtype = WIIMOTE_DEV_GEN10; goto done; } else if (!strcmp(name, "Nintendo RVL-CNT-01-TR")) { devtype = WIIMOTE_DEV_GEN20; goto done; } else if (!strcmp(name, "Nintendo RVL-WBC-01")) { devtype = WIIMOTE_DEV_BALANCE_BOARD; goto done; } else if (!strcmp(name, "Nintendo RVL-CNT-01-UC")) { devtype = WIIMOTE_DEV_PRO_CONTROLLER; goto done; } if (vendor == USB_VENDOR_ID_NINTENDO) { if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE) { devtype = WIIMOTE_DEV_GEN10; goto done; } else if (product == USB_DEVICE_ID_NINTENDO_WIIMOTE2) { devtype = WIIMOTE_DEV_GEN20; goto done; } } done: if (devtype == WIIMOTE_DEV_GENERIC) hid_info(wdata->hdev, "cannot detect device; NAME: %s VID: %04x PID: %04x EXT: %04x\n", name, vendor, product, exttype); else hid_info(wdata->hdev, "detected device: %s\n", wiimote_devtype_names[devtype]); wiimote_modules_load(wdata, devtype); } static void wiimote_init_detect(struct wiimote_data *wdata) { __u8 exttype = WIIMOTE_EXT_NONE, extdata[6]; bool ext; int ret; wiimote_cmd_acquire_noint(wdata); spin_lock_irq(&wdata->state.lock); wdata->state.devtype = WIIMOTE_DEV_UNKNOWN; wiimote_cmd_set(wdata, WIIPROTO_REQ_SREQ, 0); wiiproto_req_status(wdata); spin_unlock_irq(&wdata->state.lock); ret = wiimote_cmd_wait_noint(wdata); if (ret) goto out_release; spin_lock_irq(&wdata->state.lock); ext = wdata->state.flags & WIIPROTO_FLAG_EXT_PLUGGED; spin_unlock_irq(&wdata->state.lock); if (!ext) goto out_release; wiimote_cmd_init_ext(wdata); exttype = wiimote_cmd_read_ext(wdata, extdata); out_release: wiimote_cmd_release(wdata); wiimote_init_set_type(wdata, exttype); /* schedule MP timer */ spin_lock_irq(&wdata->state.lock); if (!(wdata->state.flags & WIIPROTO_FLAG_BUILTIN_MP) && !(wdata->state.flags & WIIPROTO_FLAG_NO_MP)) mod_timer(&wdata->timer, jiffies + HZ * 4); spin_unlock_irq(&wdata->state.lock); } /* * MP hotplug events are not generated by the wiimote. Therefore, we need * polling to detect it. We use a 4s interval for polling MP registers. This * seems reasonable considering applications can trigger it manually via * sysfs requests. */ static void wiimote_init_poll_mp(struct wiimote_data *wdata) { bool mp; __u8 mpdata[6]; wiimote_cmd_acquire_noint(wdata); wiimote_cmd_init_mp(wdata); mp = wiimote_cmd_read_mp(wdata, mpdata); wiimote_cmd_release(wdata); /* load/unload MP module if it changed */ if (mp) { if (!wdata->state.mp) { hid_info(wdata->hdev, "detected extension: Nintendo Wii Motion Plus\n"); wiimote_mp_load(wdata); } } else if (wdata->state.mp) { wiimote_mp_unload(wdata); } mod_timer(&wdata->timer, jiffies + HZ * 4); } /* * Check whether the wiimote is in the expected state. The extension registers * may change during hotplug and initialization so we might get hotplug events * that we caused by remapping some memory. * We use some heuristics here to check known states. If the wiimote is in the * expected state, we can ignore the hotplug event. * * Returns "true" if the device is in expected state, "false" if we should * redo hotplug handling and extension initialization. */ static bool wiimote_init_check(struct wiimote_data *wdata) { __u32 flags; __u8 type, data[6]; bool ret, poll_mp; spin_lock_irq(&wdata->state.lock); flags = wdata->state.flags; spin_unlock_irq(&wdata->state.lock); wiimote_cmd_acquire_noint(wdata); /* If MP is used and active, but the extension is not, we expect: * read_mp_mapped() == WIIMOTE_MP_SINGLE * state.flags == !EXT_ACTIVE && !MP_PLUGGED && MP_ACTIVE * We do not check EXT_PLUGGED because it might change during * initialization of MP without extensions. * - If MP is unplugged/replugged, read_mp_mapped() fails * - If EXT is plugged, MP_PLUGGED will get set */ if (wdata->state.exttype == WIIMOTE_EXT_NONE && wdata->state.mp > 0 && (flags & WIIPROTO_FLAG_MP_USED)) { type = wiimote_cmd_read_mp_mapped(wdata); ret = type == WIIMOTE_MP_SINGLE; spin_lock_irq(&wdata->state.lock); ret = ret && !(wdata->state.flags & WIIPROTO_FLAG_EXT_ACTIVE); ret = ret && !(wdata->state.flags & WIIPROTO_FLAG_MP_PLUGGED); ret = ret && (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE); spin_unlock_irq(&wdata->state.lock); if (!ret) hid_dbg(wdata->hdev, "state left: !EXT && MP\n"); /* while MP is mapped, we get EXT_PLUGGED events */ poll_mp = false; goto out_release; } /* If MP is unused, but the extension port is used, we expect: * read_ext == state.exttype * state.flags == !MP_ACTIVE && EXT_ACTIVE * - If MP is plugged/unplugged, our timer detects it * - If EXT is unplugged/replugged, EXT_ACTIVE will become unset */ if (!(flags & WIIPROTO_FLAG_MP_USED) && wdata->state.exttype != WIIMOTE_EXT_NONE) { type = wiimote_cmd_read_ext(wdata, data); ret = type == wdata->state.exttype; spin_lock_irq(&wdata->state.lock); ret = ret && !(wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE); ret = ret && (wdata->state.flags & WIIPROTO_FLAG_EXT_ACTIVE); spin_unlock_irq(&wdata->state.lock); if (!ret) hid_dbg(wdata->hdev, "state left: EXT && !MP\n"); /* poll MP for hotplug events */ poll_mp = true; goto out_release; } /* If neither MP nor an extension are used, we expect: * read_ext() == WIIMOTE_EXT_NONE * state.flags == !MP_ACTIVE && !EXT_ACTIVE && !EXT_PLUGGED * No need to perform any action in this case as everything is * disabled already. * - If MP is plugged/unplugged, our timer detects it * - If EXT is plugged, EXT_PLUGGED will be set */ if (!(flags & WIIPROTO_FLAG_MP_USED) && wdata->state.exttype == WIIMOTE_EXT_NONE) { type = wiimote_cmd_read_ext(wdata, data); ret = type == wdata->state.exttype; spin_lock_irq(&wdata->state.lock); ret = ret && !(wdata->state.flags & WIIPROTO_FLAG_EXT_ACTIVE); ret = ret && !(wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE); ret = ret && !(wdata->state.flags & WIIPROTO_FLAG_EXT_PLUGGED); spin_unlock_irq(&wdata->state.lock); if (!ret) hid_dbg(wdata->hdev, "state left: !EXT && !MP\n"); /* poll MP for hotplug events */ poll_mp = true; goto out_release; } /* The trickiest part is if both EXT and MP are active. We cannot read * the EXT ID, anymore, because MP is mapped over it. However, we use * a handy trick here: * - EXT_ACTIVE is unset whenever !MP_PLUGGED is sent * MP_PLUGGED might be re-sent again before we are scheduled, but * EXT_ACTIVE will stay unset. * So it is enough to check for mp_mapped() and MP_ACTIVE and * EXT_ACTIVE. EXT_PLUGGED is a sanity check. */ if (wdata->state.exttype != WIIMOTE_EXT_NONE && wdata->state.mp > 0 && (flags & WIIPROTO_FLAG_MP_USED)) { type = wiimote_cmd_read_mp_mapped(wdata); ret = type != WIIMOTE_MP_NONE; ret = ret && type != WIIMOTE_MP_UNKNOWN; ret = ret && type != WIIMOTE_MP_SINGLE; spin_lock_irq(&wdata->state.lock); ret = ret && (wdata->state.flags & WIIPROTO_FLAG_EXT_PLUGGED); ret = ret && (wdata->state.flags & WIIPROTO_FLAG_EXT_ACTIVE); ret = ret && (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE); spin_unlock_irq(&wdata->state.lock); if (!ret) hid_dbg(wdata->hdev, "state left: EXT && MP\n"); /* while MP is mapped, we get EXT_PLUGGED events */ poll_mp = false; goto out_release; } /* unknown state */ ret = false; out_release: wiimote_cmd_release(wdata); /* only poll for MP if requested and if state didn't change */ if (ret && poll_mp && !(flags & WIIPROTO_FLAG_BUILTIN_MP) && !(flags & WIIPROTO_FLAG_NO_MP)) wiimote_init_poll_mp(wdata); return ret; } static const char *wiimote_exttype_names[WIIMOTE_EXT_NUM] = { [WIIMOTE_EXT_NONE] = "None", [WIIMOTE_EXT_UNKNOWN] = "Unknown", [WIIMOTE_EXT_NUNCHUK] = "Nintendo Wii Nunchuk", [WIIMOTE_EXT_CLASSIC_CONTROLLER] = "Nintendo Wii Classic Controller", [WIIMOTE_EXT_BALANCE_BOARD] = "Nintendo Wii Balance Board", [WIIMOTE_EXT_PRO_CONTROLLER] = "Nintendo Wii U Pro Controller", [WIIMOTE_EXT_DRUMS] = "Nintendo Wii Drums", [WIIMOTE_EXT_GUITAR] = "Nintendo Wii Guitar", [WIIMOTE_EXT_TURNTABLE] = "Nintendo Wii Turntable" }; /* * Handle hotplug events * If we receive an hotplug event and the device-check failed, we deinitialize * the extension ports, re-read all extension IDs and set the device into * the desired state. This involves mapping MP into the main extension * registers, setting up extension passthrough modes and initializing the * requested extensions. */ static void wiimote_init_hotplug(struct wiimote_data *wdata) { __u8 exttype, extdata[6], mpdata[6]; __u32 flags; bool mp; hid_dbg(wdata->hdev, "detect extensions..\n"); wiimote_cmd_acquire_noint(wdata); spin_lock_irq(&wdata->state.lock); /* get state snapshot that we will then work on */ flags = wdata->state.flags; /* disable event forwarding temporarily */ wdata->state.flags &= ~WIIPROTO_FLAG_EXT_ACTIVE; wdata->state.flags &= ~WIIPROTO_FLAG_MP_ACTIVE; spin_unlock_irq(&wdata->state.lock); /* init extension and MP (deactivates current extension or MP) */ wiimote_cmd_init_ext(wdata); if (flags & WIIPROTO_FLAG_NO_MP) { mp = false; } else { wiimote_cmd_init_mp(wdata); mp = wiimote_cmd_read_mp(wdata, mpdata); } exttype = wiimote_cmd_read_ext(wdata, extdata); wiimote_cmd_release(wdata); /* load/unload extension module if it changed */ if (exttype != wdata->state.exttype) { /* unload previous extension */ wiimote_ext_unload(wdata); if (exttype == WIIMOTE_EXT_UNKNOWN) { hid_info(wdata->hdev, "cannot detect extension; %6phC\n", extdata); } else if (exttype == WIIMOTE_EXT_NONE) { spin_lock_irq(&wdata->state.lock); wdata->state.exttype = WIIMOTE_EXT_NONE; spin_unlock_irq(&wdata->state.lock); } else { hid_info(wdata->hdev, "detected extension: %s\n", wiimote_exttype_names[exttype]); /* try loading new extension */ wiimote_ext_load(wdata, exttype); } } /* load/unload MP module if it changed */ if (mp) { if (!wdata->state.mp) { hid_info(wdata->hdev, "detected extension: Nintendo Wii Motion Plus\n"); wiimote_mp_load(wdata); } } else if (wdata->state.mp) { wiimote_mp_unload(wdata); } /* if MP is not used, do not map or activate it */ if (!(flags & WIIPROTO_FLAG_MP_USED)) mp = false; /* map MP into main extension registers if used */ if (mp) { wiimote_cmd_acquire_noint(wdata); wiimote_cmd_map_mp(wdata, exttype); wiimote_cmd_release(wdata); /* delete MP hotplug timer */ del_timer_sync(&wdata->timer); } else { /* reschedule MP hotplug timer */ if (!(flags & WIIPROTO_FLAG_BUILTIN_MP) && !(flags & WIIPROTO_FLAG_NO_MP)) mod_timer(&wdata->timer, jiffies + HZ * 4); } spin_lock_irq(&wdata->state.lock); /* enable data forwarding again and set expected hotplug state */ if (mp) { wdata->state.flags |= WIIPROTO_FLAG_MP_ACTIVE; if (wdata->state.exttype == WIIMOTE_EXT_NONE) { wdata->state.flags &= ~WIIPROTO_FLAG_EXT_PLUGGED; wdata->state.flags &= ~WIIPROTO_FLAG_MP_PLUGGED; } else { wdata->state.flags &= ~WIIPROTO_FLAG_EXT_PLUGGED; wdata->state.flags |= WIIPROTO_FLAG_MP_PLUGGED; wdata->state.flags |= WIIPROTO_FLAG_EXT_ACTIVE; } } else if (wdata->state.exttype != WIIMOTE_EXT_NONE) { wdata->state.flags |= WIIPROTO_FLAG_EXT_ACTIVE; } /* request status report for hotplug state updates */ wiiproto_req_status(wdata); spin_unlock_irq(&wdata->state.lock); hid_dbg(wdata->hdev, "detected extensions: MP: %d EXT: %d\n", wdata->state.mp, wdata->state.exttype); } static void wiimote_init_worker(struct work_struct *work) { struct wiimote_data *wdata = container_of(work, struct wiimote_data, init_worker); bool changed = false; if (wdata->state.devtype == WIIMOTE_DEV_PENDING) { wiimote_init_detect(wdata); changed = true; } if (changed || !wiimote_init_check(wdata)) wiimote_init_hotplug(wdata); if (changed) kobject_uevent(&wdata->hdev->dev.kobj, KOBJ_CHANGE); } void __wiimote_schedule(struct wiimote_data *wdata) { if (!(wdata->state.flags & WIIPROTO_FLAG_EXITING)) schedule_work(&wdata->init_worker); } static void wiimote_schedule(struct wiimote_data *wdata) { unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); __wiimote_schedule(wdata); spin_unlock_irqrestore(&wdata->state.lock, flags); } static void wiimote_init_timeout(struct timer_list *t) { struct wiimote_data *wdata = from_timer(wdata, t, timer); wiimote_schedule(wdata); } /* protocol handlers */ static void handler_keys(struct wiimote_data *wdata, const __u8 *payload) { const __u8 *iter, *mods; const struct wiimod_ops *ops; ops = wiimod_ext_table[wdata->state.exttype]; if (ops->in_keys) { ops->in_keys(wdata, payload); return; } mods = wiimote_devtype_mods[wdata->state.devtype]; for (iter = mods; *iter != WIIMOD_NULL; ++iter) { ops = wiimod_table[*iter]; if (ops->in_keys) { ops->in_keys(wdata, payload); break; } } } static void handler_accel(struct wiimote_data *wdata, const __u8 *payload) { const __u8 *iter, *mods; const struct wiimod_ops *ops; ops = wiimod_ext_table[wdata->state.exttype]; if (ops->in_accel) { ops->in_accel(wdata, payload); return; } mods = wiimote_devtype_mods[wdata->state.devtype]; for (iter = mods; *iter != WIIMOD_NULL; ++iter) { ops = wiimod_table[*iter]; if (ops->in_accel) { ops->in_accel(wdata, payload); break; } } } static bool valid_ext_handler(const struct wiimod_ops *ops, size_t len) { if (!ops->in_ext) return false; if ((ops->flags & WIIMOD_FLAG_EXT8) && len < 8) return false; if ((ops->flags & WIIMOD_FLAG_EXT16) && len < 16) return false; return true; } static void handler_ext(struct wiimote_data *wdata, const __u8 *payload, size_t len) { static const __u8 invalid[21] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; const __u8 *iter, *mods; const struct wiimod_ops *ops; bool is_mp; if (len > 21) len = 21; if (len < 6 || !memcmp(payload, invalid, len)) return; /* if MP is active, track MP slot hotplugging */ if (wdata->state.flags & WIIPROTO_FLAG_MP_ACTIVE) { /* this bit is set for invalid events (eg. during hotplug) */ if (payload[5] & 0x01) return; if (payload[4] & 0x01) { if (!(wdata->state.flags & WIIPROTO_FLAG_MP_PLUGGED)) { hid_dbg(wdata->hdev, "MP hotplug: 1\n"); wdata->state.flags |= WIIPROTO_FLAG_MP_PLUGGED; __wiimote_schedule(wdata); } } else { if (wdata->state.flags & WIIPROTO_FLAG_MP_PLUGGED) { hid_dbg(wdata->hdev, "MP hotplug: 0\n"); wdata->state.flags &= ~WIIPROTO_FLAG_MP_PLUGGED; wdata->state.flags &= ~WIIPROTO_FLAG_EXT_ACTIVE; __wiimote_schedule(wdata); } } /* detect MP data that is sent interleaved with EXT data */ is_mp = payload[5] & 0x02; } else { is_mp = false; } /* ignore EXT events if no extension is active */ if (!(wdata->state.flags & WIIPROTO_FLAG_EXT_ACTIVE) && !is_mp) return; /* try forwarding to extension handler, first */ ops = wiimod_ext_table[wdata->state.exttype]; if (is_mp && ops->in_mp) { ops->in_mp(wdata, payload); return; } else if (!is_mp && valid_ext_handler(ops, len)) { ops->in_ext(wdata, payload); return; } /* try forwarding to MP handler */ ops = &wiimod_mp; if (is_mp && ops->in_mp) { ops->in_mp(wdata, payload); return; } else if (!is_mp && valid_ext_handler(ops, len)) { ops->in_ext(wdata, payload); return; } /* try forwarding to loaded modules */ mods = wiimote_devtype_mods[wdata->state.devtype]; for (iter = mods; *iter != WIIMOD_NULL; ++iter) { ops = wiimod_table[*iter]; if (is_mp && ops->in_mp) { ops->in_mp(wdata, payload); return; } else if (!is_mp && valid_ext_handler(ops, len)) { ops->in_ext(wdata, payload); return; } } } #define ir_to_input0(wdata, ir, packed) handler_ir((wdata), (ir), (packed), 0) #define ir_to_input1(wdata, ir, packed) handler_ir((wdata), (ir), (packed), 1) #define ir_to_input2(wdata, ir, packed) handler_ir((wdata), (ir), (packed), 2) #define ir_to_input3(wdata, ir, packed) handler_ir((wdata), (ir), (packed), 3) static void handler_ir(struct wiimote_data *wdata, const __u8 *payload, bool packed, unsigned int id) { const __u8 *iter, *mods; const struct wiimod_ops *ops; ops = wiimod_ext_table[wdata->state.exttype]; if (ops->in_ir) { ops->in_ir(wdata, payload, packed, id); return; } mods = wiimote_devtype_mods[wdata->state.devtype]; for (iter = mods; *iter != WIIMOD_NULL; ++iter) { ops = wiimod_table[*iter]; if (ops->in_ir) { ops->in_ir(wdata, payload, packed, id); break; } } } /* reduced status report with "BB BB" key data only */ static void handler_status_K(struct wiimote_data *wdata, const __u8 *payload) { handler_keys(wdata, payload); /* on status reports the drm is reset so we need to resend the drm */ wiiproto_req_drm(wdata, WIIPROTO_REQ_NULL); } /* extended status report with "BB BB LF 00 00 VV" data */ static void handler_status(struct wiimote_data *wdata, const __u8 *payload) { handler_status_K(wdata, payload); /* update extension status */ if (payload[2] & 0x02) { if (!(wdata->state.flags & WIIPROTO_FLAG_EXT_PLUGGED)) { hid_dbg(wdata->hdev, "EXT hotplug: 1\n"); wdata->state.flags |= WIIPROTO_FLAG_EXT_PLUGGED; __wiimote_schedule(wdata); } } else { if (wdata->state.flags & WIIPROTO_FLAG_EXT_PLUGGED) { hid_dbg(wdata->hdev, "EXT hotplug: 0\n"); wdata->state.flags &= ~WIIPROTO_FLAG_EXT_PLUGGED; wdata->state.flags &= ~WIIPROTO_FLAG_MP_PLUGGED; wdata->state.flags &= ~WIIPROTO_FLAG_EXT_ACTIVE; wdata->state.flags &= ~WIIPROTO_FLAG_MP_ACTIVE; __wiimote_schedule(wdata); } } wdata->state.cmd_battery = payload[5]; if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_SREQ, 0)) wiimote_cmd_complete(wdata); } /* reduced generic report with "BB BB" key data only */ static void handler_generic_K(struct wiimote_data *wdata, const __u8 *payload) { handler_keys(wdata, payload); } static void handler_data(struct wiimote_data *wdata, const __u8 *payload) { __u16 offset = payload[3] << 8 | payload[4]; __u8 size = (payload[2] >> 4) + 1; __u8 err = payload[2] & 0x0f; handler_keys(wdata, payload); if (wiimote_cmd_pending(wdata, WIIPROTO_REQ_RMEM, offset)) { if (err) size = 0; else if (size > wdata->state.cmd_read_size) size = wdata->state.cmd_read_size; wdata->state.cmd_read_size = size; if (wdata->state.cmd_read_buf) memcpy(wdata->state.cmd_read_buf, &payload[5], size); wiimote_cmd_complete(wdata); } } static void handler_return(struct wiimote_data *wdata, const __u8 *payload) { __u8 err = payload[3]; __u8 cmd = payload[2]; handler_keys(wdata, payload); if (wiimote_cmd_pending(wdata, cmd, 0)) { wdata->state.cmd_err = err; wiimote_cmd_complete(wdata); } else if (err) { hid_warn(wdata->hdev, "Remote error %u on req %u\n", err, cmd); } } static void handler_drm_KA(struct wiimote_data *wdata, const __u8 *payload) { handler_keys(wdata, payload); handler_accel(wdata, payload); } static void handler_drm_KE(struct wiimote_data *wdata, const __u8 *payload) { handler_keys(wdata, payload); handler_ext(wdata, &payload[2], 8); } static void handler_drm_KAI(struct wiimote_data *wdata, const __u8 *payload) { handler_keys(wdata, payload); handler_accel(wdata, payload); ir_to_input0(wdata, &payload[5], false); ir_to_input1(wdata, &payload[8], false); ir_to_input2(wdata, &payload[11], false); ir_to_input3(wdata, &payload[14], false); } static void handler_drm_KEE(struct wiimote_data *wdata, const __u8 *payload) { handler_keys(wdata, payload); handler_ext(wdata, &payload[2], 19); } static void handler_drm_KIE(struct wiimote_data *wdata, const __u8 *payload) { handler_keys(wdata, payload); ir_to_input0(wdata, &payload[2], false); ir_to_input1(wdata, &payload[4], true); ir_to_input2(wdata, &payload[7], false); ir_to_input3(wdata, &payload[9], true); handler_ext(wdata, &payload[12], 9); } static void handler_drm_KAE(struct wiimote_data *wdata, const __u8 *payload) { handler_keys(wdata, payload); handler_accel(wdata, payload); handler_ext(wdata, &payload[5], 16); } static void handler_drm_KAIE(struct wiimote_data *wdata, const __u8 *payload) { handler_keys(wdata, payload); handler_accel(wdata, payload); ir_to_input0(wdata, &payload[5], false); ir_to_input1(wdata, &payload[7], true); ir_to_input2(wdata, &payload[10], false); ir_to_input3(wdata, &payload[12], true); handler_ext(wdata, &payload[15], 6); } static void handler_drm_E(struct wiimote_data *wdata, const __u8 *payload) { handler_ext(wdata, payload, 21); } static void handler_drm_SKAI1(struct wiimote_data *wdata, const __u8 *payload) { handler_keys(wdata, payload); wdata->state.accel_split[0] = payload[2]; wdata->state.accel_split[1] = (payload[0] >> 1) & (0x10 | 0x20); wdata->state.accel_split[1] |= (payload[1] << 1) & (0x40 | 0x80); ir_to_input0(wdata, &payload[3], false); ir_to_input1(wdata, &payload[12], false); } static void handler_drm_SKAI2(struct wiimote_data *wdata, const __u8 *payload) { __u8 buf[5]; handler_keys(wdata, payload); wdata->state.accel_split[1] |= (payload[0] >> 5) & (0x01 | 0x02); wdata->state.accel_split[1] |= (payload[1] >> 3) & (0x04 | 0x08); buf[0] = 0; buf[1] = 0; buf[2] = wdata->state.accel_split[0]; buf[3] = payload[2]; buf[4] = wdata->state.accel_split[1]; handler_accel(wdata, buf); ir_to_input2(wdata, &payload[3], false); ir_to_input3(wdata, &payload[12], false); } struct wiiproto_handler { __u8 id; size_t size; void (*func)(struct wiimote_data *wdata, const __u8 *payload); }; static const struct wiiproto_handler handlers[] = { { .id = WIIPROTO_REQ_STATUS, .size = 6, .func = handler_status }, { .id = WIIPROTO_REQ_STATUS, .size = 2, .func = handler_status_K }, { .id = WIIPROTO_REQ_DATA, .size = 21, .func = handler_data }, { .id = WIIPROTO_REQ_DATA, .size = 2, .func = handler_generic_K }, { .id = WIIPROTO_REQ_RETURN, .size = 4, .func = handler_return }, { .id = WIIPROTO_REQ_RETURN, .size = 2, .func = handler_generic_K }, { .id = WIIPROTO_REQ_DRM_K, .size = 2, .func = handler_keys }, { .id = WIIPROTO_REQ_DRM_KA, .size = 5, .func = handler_drm_KA }, { .id = WIIPROTO_REQ_DRM_KA, .size = 2, .func = handler_generic_K }, { .id = WIIPROTO_REQ_DRM_KE, .size = 10, .func = handler_drm_KE }, { .id = WIIPROTO_REQ_DRM_KE, .size = 2, .func = handler_generic_K }, { .id = WIIPROTO_REQ_DRM_KAI, .size = 17, .func = handler_drm_KAI }, { .id = WIIPROTO_REQ_DRM_KAI, .size = 2, .func = handler_generic_K }, { .id = WIIPROTO_REQ_DRM_KEE, .size = 21, .func = handler_drm_KEE }, { .id = WIIPROTO_REQ_DRM_KEE, .size = 2, .func = handler_generic_K }, { .id = WIIPROTO_REQ_DRM_KAE, .size = 21, .func = handler_drm_KAE }, { .id = WIIPROTO_REQ_DRM_KAE, .size = 2, .func = handler_generic_K }, { .id = WIIPROTO_REQ_DRM_KIE, .size = 21, .func = handler_drm_KIE }, { .id = WIIPROTO_REQ_DRM_KIE, .size = 2, .func = handler_generic_K }, { .id = WIIPROTO_REQ_DRM_KAIE, .size = 21, .func = handler_drm_KAIE }, { .id = WIIPROTO_REQ_DRM_KAIE, .size = 2, .func = handler_generic_K }, { .id = WIIPROTO_REQ_DRM_E, .size = 21, .func = handler_drm_E }, { .id = WIIPROTO_REQ_DRM_SKAI1, .size = 21, .func = handler_drm_SKAI1 }, { .id = WIIPROTO_REQ_DRM_SKAI2, .size = 21, .func = handler_drm_SKAI2 }, { .id = 0 } }; static int wiimote_hid_event(struct hid_device *hdev, struct hid_report *report, u8 *raw_data, int size) { struct wiimote_data *wdata = hid_get_drvdata(hdev); const struct wiiproto_handler *h; int i; unsigned long flags; if (size < 1) return -EINVAL; for (i = 0; handlers[i].id; ++i) { h = &handlers[i]; if (h->id == raw_data[0] && h->size < size) { spin_lock_irqsave(&wdata->state.lock, flags); h->func(wdata, &raw_data[1]); spin_unlock_irqrestore(&wdata->state.lock, flags); break; } } if (!handlers[i].id) hid_warn(hdev, "Unhandled report %hhu size %d\n", raw_data[0], size); return 0; } static ssize_t wiimote_ext_show(struct device *dev, struct device_attribute *attr, char *buf) { struct wiimote_data *wdata = dev_to_wii(dev); __u8 type; unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); type = wdata->state.exttype; spin_unlock_irqrestore(&wdata->state.lock, flags); switch (type) { case WIIMOTE_EXT_NONE: return sprintf(buf, "none\n"); case WIIMOTE_EXT_NUNCHUK: return sprintf(buf, "nunchuk\n"); case WIIMOTE_EXT_CLASSIC_CONTROLLER: return sprintf(buf, "classic\n"); case WIIMOTE_EXT_BALANCE_BOARD: return sprintf(buf, "balanceboard\n"); case WIIMOTE_EXT_PRO_CONTROLLER: return sprintf(buf, "procontroller\n"); case WIIMOTE_EXT_DRUMS: return sprintf(buf, "drums\n"); case WIIMOTE_EXT_GUITAR: return sprintf(buf, "guitar\n"); case WIIMOTE_EXT_TURNTABLE: return sprintf(buf, "turntable\n"); case WIIMOTE_EXT_UNKNOWN: default: return sprintf(buf, "unknown\n"); } } static ssize_t wiimote_ext_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct wiimote_data *wdata = dev_to_wii(dev); if (!strcmp(buf, "scan")) { wiimote_schedule(wdata); } else { return -EINVAL; } return strnlen(buf, PAGE_SIZE); } static DEVICE_ATTR(extension, S_IRUGO | S_IWUSR | S_IWGRP, wiimote_ext_show, wiimote_ext_store); static ssize_t wiimote_dev_show(struct device *dev, struct device_attribute *attr, char *buf) { struct wiimote_data *wdata = dev_to_wii(dev); __u8 type; unsigned long flags; spin_lock_irqsave(&wdata->state.lock, flags); type = wdata->state.devtype; spin_unlock_irqrestore(&wdata->state.lock, flags); switch (type) { case WIIMOTE_DEV_GENERIC: return sprintf(buf, "generic\n"); case WIIMOTE_DEV_GEN10: return sprintf(buf, "gen10\n"); case WIIMOTE_DEV_GEN20: return sprintf(buf, "gen20\n"); case WIIMOTE_DEV_BALANCE_BOARD: return sprintf(buf, "balanceboard\n"); case WIIMOTE_DEV_PRO_CONTROLLER: return sprintf(buf, "procontroller\n"); case WIIMOTE_DEV_PENDING: return sprintf(buf, "pending\n"); case WIIMOTE_DEV_UNKNOWN: default: return sprintf(buf, "unknown\n"); } } static DEVICE_ATTR(devtype, S_IRUGO, wiimote_dev_show, NULL); static struct wiimote_data *wiimote_create(struct hid_device *hdev) { struct wiimote_data *wdata; wdata = kzalloc(sizeof(*wdata), GFP_KERNEL); if (!wdata) return NULL; wdata->hdev = hdev; hid_set_drvdata(hdev, wdata); spin_lock_init(&wdata->queue.lock); INIT_WORK(&wdata->queue.worker, wiimote_queue_worker); spin_lock_init(&wdata->state.lock); init_completion(&wdata->state.ready); mutex_init(&wdata->state.sync); wdata->state.drm = WIIPROTO_REQ_DRM_K; wdata->state.cmd_battery = 0xff; INIT_WORK(&wdata->init_worker, wiimote_init_worker); timer_setup(&wdata->timer, wiimote_init_timeout, 0); return wdata; } static void wiimote_destroy(struct wiimote_data *wdata) { unsigned long flags; wiidebug_deinit(wdata); /* prevent init_worker from being scheduled again */ spin_lock_irqsave(&wdata->state.lock, flags); wdata->state.flags |= WIIPROTO_FLAG_EXITING; spin_unlock_irqrestore(&wdata->state.lock, flags); cancel_work_sync(&wdata->init_worker); timer_shutdown_sync(&wdata->timer); device_remove_file(&wdata->hdev->dev, &dev_attr_devtype); device_remove_file(&wdata->hdev->dev, &dev_attr_extension); wiimote_mp_unload(wdata); wiimote_ext_unload(wdata); wiimote_modules_unload(wdata); cancel_work_sync(&wdata->queue.worker); hid_hw_close(wdata->hdev); hid_hw_stop(wdata->hdev); kfree(wdata); } static int wiimote_hid_probe(struct hid_device *hdev, const struct hid_device_id *id) { struct wiimote_data *wdata; int ret; hdev->quirks |= HID_QUIRK_NO_INIT_REPORTS; wdata = wiimote_create(hdev); if (!wdata) { hid_err(hdev, "Can't alloc device\n"); return -ENOMEM; } ret = hid_parse(hdev); if (ret) { hid_err(hdev, "HID parse failed\n"); goto err; } ret = hid_hw_start(hdev, HID_CONNECT_HIDRAW); if (ret) { hid_err(hdev, "HW start failed\n"); goto err; } ret = hid_hw_open(hdev); if (ret) { hid_err(hdev, "cannot start hardware I/O\n"); goto err_stop; } ret = device_create_file(&hdev->dev, &dev_attr_extension); if (ret) { hid_err(hdev, "cannot create sysfs attribute\n"); goto err_close; } ret = device_create_file(&hdev->dev, &dev_attr_devtype); if (ret) { hid_err(hdev, "cannot create sysfs attribute\n"); goto err_ext; } ret = wiidebug_init(wdata); if (ret) goto err_free; hid_info(hdev, "New device registered\n"); /* schedule device detection */ wiimote_schedule(wdata); return 0; err_free: wiimote_destroy(wdata); return ret; err_ext: device_remove_file(&wdata->hdev->dev, &dev_attr_extension); err_close: hid_hw_close(hdev); err_stop: hid_hw_stop(hdev); err: input_free_device(wdata->ir); input_free_device(wdata->accel); kfree(wdata); return ret; } static void wiimote_hid_remove(struct hid_device *hdev) { struct wiimote_data *wdata = hid_get_drvdata(hdev); hid_info(hdev, "Device removed\n"); wiimote_destroy(wdata); } static const struct hid_device_id wiimote_hid_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE) }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_NINTENDO, USB_DEVICE_ID_NINTENDO_WIIMOTE2) }, { } }; bool wiimote_dpad_as_analog = false; module_param_named(dpad_as_analog, wiimote_dpad_as_analog, bool, 0644); MODULE_PARM_DESC(dpad_as_analog, "Use D-Pad as main analog input"); MODULE_DEVICE_TABLE(hid, wiimote_hid_devices); static struct hid_driver wiimote_hid_driver = { .name = "wiimote", .id_table = wiimote_hid_devices, .probe = wiimote_hid_probe, .remove = wiimote_hid_remove, .raw_event = wiimote_hid_event, }; module_hid_driver(wiimote_hid_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Herrmann <[email protected]>"); MODULE_DESCRIPTION("Driver for Nintendo Wii / Wii U peripherals");
// SPDX-License-Identifier: GPL-2.0-or-later /* */ #include <linux/init.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/audio.h> #include <linux/usb/audio-v2.h> #include <linux/usb/audio-v3.h> #include <sound/core.h> #include <sound/pcm.h> #include "usbaudio.h" #include "card.h" #include "quirks.h" #include "helper.h" #include "clock.h" #include "format.h" /* * parse the audio format type I descriptor * and returns the corresponding pcm format * * @dev: usb device * @fp: audioformat record * @format: the format tag (wFormatTag) * @fmt: the format type descriptor (v1/v2) or AudioStreaming descriptor (v3) */ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip, struct audioformat *fp, u64 format, void *_fmt) { int sample_width, sample_bytes; u64 pcm_formats = 0; switch (fp->protocol) { case UAC_VERSION_1: default: { struct uac_format_type_i_discrete_descriptor *fmt = _fmt; if (format >= 64) { usb_audio_info(chip, "%u:%d: invalid format type 0x%llx is detected, processed as PCM\n", fp->iface, fp->altsetting, format); format = UAC_FORMAT_TYPE_I_PCM; } sample_width = fmt->bBitResolution; sample_bytes = fmt->bSubframeSize; format = 1ULL << format; break; } case UAC_VERSION_2: { struct uac_format_type_i_ext_descriptor *fmt = _fmt; sample_width = fmt->bBitResolution; sample_bytes = fmt->bSubslotSize; if (format & UAC2_FORMAT_TYPE_I_RAW_DATA) { pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL; /* flag potentially raw DSD capable altsettings */ fp->dsd_raw = true; } format <<= 1; break; } case UAC_VERSION_3: { struct uac3_as_header_descriptor *as = _fmt; sample_width = as->bBitResolution; sample_bytes = as->bSubslotSize; if (format & UAC3_FORMAT_TYPE_I_RAW_DATA) pcm_formats |= SNDRV_PCM_FMTBIT_SPECIAL; format <<= 1; break; } } fp->fmt_bits = sample_width; if ((pcm_formats == 0) && (format == 0 || format == BIT(UAC_FORMAT_TYPE_I_UNDEFINED))) { /* some devices don't define this correctly... */ usb_audio_info(chip, "%u:%d : format type 0 is detected, processed as PCM\n", fp->iface, fp->altsetting); format = BIT(UAC_FORMAT_TYPE_I_PCM); } if (format & BIT(UAC_FORMAT_TYPE_I_PCM)) { if (((chip->usb_id == USB_ID(0x0582, 0x0016)) || /* Edirol SD-90 */ (chip->usb_id == USB_ID(0x0582, 0x000c))) && /* Roland SC-D70 */ sample_width == 24 && sample_bytes == 2) sample_bytes = 3; else if (sample_width > sample_bytes * 8) { usb_audio_info(chip, "%u:%d : sample bitwidth %d in over sample bytes %d\n", fp->iface, fp->altsetting, sample_width, sample_bytes); } /* check the format byte size */ switch (sample_bytes) { case 1: pcm_formats |= SNDRV_PCM_FMTBIT_S8; break; case 2: if (snd_usb_is_big_endian_format(chip, fp)) pcm_formats |= SNDRV_PCM_FMTBIT_S16_BE; /* grrr, big endian!! */ else pcm_formats |= SNDRV_PCM_FMTBIT_S16_LE; break; case 3: if (snd_usb_is_big_endian_format(chip, fp)) pcm_formats |= SNDRV_PCM_FMTBIT_S24_3BE; /* grrr, big endian!! */ else pcm_formats |= SNDRV_PCM_FMTBIT_S24_3LE; break; case 4: pcm_formats |= SNDRV_PCM_FMTBIT_S32_LE; break; default: usb_audio_info(chip, "%u:%d : unsupported sample bitwidth %d in %d bytes\n", fp->iface, fp->altsetting, sample_width, sample_bytes); break; } } if (format & BIT(UAC_FORMAT_TYPE_I_PCM8)) { /* Dallas DS4201 workaround: it advertises U8 format, but really supports S8. */ if (chip->usb_id == USB_ID(0x04fa, 0x4201)) pcm_formats |= SNDRV_PCM_FMTBIT_S8; else pcm_formats |= SNDRV_PCM_FMTBIT_U8; } if (format & BIT(UAC_FORMAT_TYPE_I_IEEE_FLOAT)) pcm_formats |= SNDRV_PCM_FMTBIT_FLOAT_LE; if (format & BIT(UAC_FORMAT_TYPE_I_ALAW)) pcm_formats |= SNDRV_PCM_FMTBIT_A_LAW; if (format & BIT(UAC_FORMAT_TYPE_I_MULAW)) pcm_formats |= SNDRV_PCM_FMTBIT_MU_LAW; if (format & ~0x3f) { usb_audio_info(chip, "%u:%d : unsupported format bits %#llx\n", fp->iface, fp->altsetting, format); } pcm_formats |= snd_usb_interface_dsd_format_quirks(chip, fp, sample_bytes); return pcm_formats; } static int set_fixed_rate(struct audioformat *fp, int rate, int rate_bits) { kfree(fp->rate_table); fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL); if (!fp->rate_table) return -ENOMEM; fp->nr_rates = 1; fp->rate_min = rate; fp->rate_max = rate; fp->rates = rate_bits; fp->rate_table[0] = rate; return 0; } /* set up rate_min, rate_max and rates from the rate table */ static void set_rate_table_min_max(struct audioformat *fp) { unsigned int rate; int i; fp->rate_min = INT_MAX; fp->rate_max = 0; fp->rates = 0; for (i = 0; i < fp->nr_rates; i++) { rate = fp->rate_table[i]; fp->rate_min = min(fp->rate_min, rate); fp->rate_max = max(fp->rate_max, rate); fp->rates |= snd_pcm_rate_to_rate_bit(rate); } } /* * parse the format descriptor and stores the possible sample rates * on the audioformat table (audio class v1). * * @dev: usb device * @fp: audioformat record * @fmt: the format descriptor * @offset: the start offset of descriptor pointing the rate type * (7 for type I and II, 8 for type II) */ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audioformat *fp, unsigned char *fmt, int offset) { int nr_rates = fmt[offset]; if (fmt[0] < offset + 1 + 3 * (nr_rates ? nr_rates : 2)) { usb_audio_err(chip, "%u:%d : invalid UAC_FORMAT_TYPE desc\n", fp->iface, fp->altsetting); return -EINVAL; } if (nr_rates) { /* * build the rate table and bitmap flags */ int r, idx; fp->rate_table = kmalloc_array(nr_rates, sizeof(int), GFP_KERNEL); if (fp->rate_table == NULL) return -ENOMEM; fp->nr_rates = 0; for (r = 0, idx = offset + 1; r < nr_rates; r++, idx += 3) { unsigned int rate = combine_triple(&fmt[idx]); if (!rate) continue; /* C-Media CM6501 mislabels its 96 kHz altsetting */ /* Terratec Aureon 7.1 USB C-Media 6206, too */ /* Ozone Z90 USB C-Media, too */ if (rate == 48000 && nr_rates == 1 && (chip->usb_id == USB_ID(0x0d8c, 0x0201) || chip->usb_id == USB_ID(0x0d8c, 0x0102) || chip->usb_id == USB_ID(0x0d8c, 0x0078) || chip->usb_id == USB_ID(0x0ccd, 0x00b1)) && fp->altsetting == 5 && fp->maxpacksize == 392) rate = 96000; /* Creative VF0420/VF0470 Live Cams report 16 kHz instead of 8kHz */ if (rate == 16000 && (chip->usb_id == USB_ID(0x041e, 0x4064) || chip->usb_id == USB_ID(0x041e, 0x4068))) rate = 8000; fp->rate_table[fp->nr_rates++] = rate; } if (!fp->nr_rates) { usb_audio_info(chip, "%u:%d: All rates were zero\n", fp->iface, fp->altsetting); return -EINVAL; } set_rate_table_min_max(fp); } else { /* continuous rates */ fp->rates = SNDRV_PCM_RATE_CONTINUOUS; fp->rate_min = combine_triple(&fmt[offset + 1]); fp->rate_max = combine_triple(&fmt[offset + 4]); } /* Jabra Evolve 65 headset */ if (chip->usb_id == USB_ID(0x0b0e, 0x030b)) { /* only 48kHz for playback while keeping 16kHz for capture */ if (fp->nr_rates != 1) return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000); } return 0; } /* * Presonus Studio 1810c supports a limited set of sampling * rates per altsetting but reports the full set each time. * If we don't filter out the unsupported rates and attempt * to configure the card, it will hang refusing to do any * further audio I/O until a hard reset is performed. * * The list of supported rates per altsetting (set of available * I/O channels) is described in the owner's manual, section 2.2. */ static bool s1810c_valid_sample_rate(struct audioformat *fp, unsigned int rate) { switch (fp->altsetting) { case 1: /* All ADAT ports available */ return rate <= 48000; case 2: /* Half of ADAT ports available */ return (rate == 88200 || rate == 96000); case 3: /* Analog I/O only (no S/PDIF nor ADAT) */ return rate >= 176400; default: return false; } return false; } /* * Many Focusrite devices supports a limited set of sampling rates per * altsetting. Maximum rate is exposed in the last 4 bytes of Format Type * descriptor which has a non-standard bLength = 10. */ static bool focusrite_valid_sample_rate(struct snd_usb_audio *chip, struct audioformat *fp, unsigned int rate) { struct usb_interface *iface; struct usb_host_interface *alts; unsigned char *fmt; unsigned int max_rate; iface = usb_ifnum_to_if(chip->dev, fp->iface); if (!iface) return true; alts = &iface->altsetting[fp->altset_idx]; fmt = snd_usb_find_csint_desc(alts->extra, alts->extralen, NULL, UAC_FORMAT_TYPE); if (!fmt) return true; if (fmt[0] == 10) { /* bLength */ max_rate = combine_quad(&fmt[6]); /* Validate max rate */ if (max_rate != 48000 && max_rate != 96000 && max_rate != 192000 && max_rate != 384000) { usb_audio_info(chip, "%u:%d : unexpected max rate: %u\n", fp->iface, fp->altsetting, max_rate); return true; } return rate <= max_rate; } return true; } /* * Helper function to walk the array of sample rate triplets reported by * the device. The problem is that we need to parse whole array first to * get to know how many sample rates we have to expect. * Then fp->rate_table can be allocated and filled. */ static int parse_uac2_sample_rate_range(struct snd_usb_audio *chip, struct audioformat *fp, int nr_triplets, const unsigned char *data) { int i, nr_rates = 0; for (i = 0; i < nr_triplets; i++) { int min = combine_quad(&data[2 + 12 * i]); int max = combine_quad(&data[6 + 12 * i]); int res = combine_quad(&data[10 + 12 * i]); unsigned int rate; if ((max < 0) || (min < 0) || (res < 0) || (max < min)) continue; /* * for ranges with res == 1, we announce a continuous sample * rate range, and this function should return 0 for no further * parsing. */ if (res == 1) { fp->rate_min = min; fp->rate_max = max; fp->rates = SNDRV_PCM_RATE_CONTINUOUS; return 0; } for (rate = min; rate <= max; rate += res) { /* Filter out invalid rates on Presonus Studio 1810c */ if (chip->usb_id == USB_ID(0x194f, 0x010c) && !s1810c_valid_sample_rate(fp, rate)) goto skip_rate; /* Filter out invalid rates on Focusrite devices */ if (USB_ID_VENDOR(chip->usb_id) == 0x1235 && !focusrite_valid_sample_rate(chip, fp, rate)) goto skip_rate; if (fp->rate_table) fp->rate_table[nr_rates] = rate; nr_rates++; if (nr_rates >= MAX_NR_RATES) { usb_audio_err(chip, "invalid uac2 rates\n"); break; } skip_rate: /* avoid endless loop */ if (res == 0) break; } } return nr_rates; } /* Line6 Helix series and the Rode Rodecaster Pro don't support the * UAC2_CS_RANGE usb function call. Return a static table of known * clock rates. */ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip, struct audioformat *fp) { switch (chip->usb_id) { case USB_ID(0x0e41, 0x4241): /* Line6 Helix */ case USB_ID(0x0e41, 0x4242): /* Line6 Helix Rack */ case USB_ID(0x0e41, 0x4244): /* Line6 Helix LT */ case USB_ID(0x0e41, 0x4246): /* Line6 HX-Stomp */ case USB_ID(0x0e41, 0x4253): /* Line6 HX-Stomp XL */ case USB_ID(0x0e41, 0x4247): /* Line6 Pod Go */ case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */ case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */ case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */ case USB_ID(0x0e41, 0x424b): /* Line6 Pod Go */ case USB_ID(0x19f7, 0x0011): /* Rode Rodecaster Pro */ return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000); } return -ENODEV; } /* check whether the given altsetting is supported for the already set rate */ static bool check_valid_altsetting_v2v3(struct snd_usb_audio *chip, int iface, int altsetting) { struct usb_device *dev = chip->dev; __le64 raw_data = 0; u64 data; int err; /* we assume 64bit is enough for any altsettings */ if (snd_BUG_ON(altsetting >= 64 - 8)) return false; err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, UAC2_AS_VAL_ALT_SETTINGS << 8, iface, &raw_data, sizeof(raw_data)); if (err < 0) return false; data = le64_to_cpu(raw_data); /* first byte contains the bitmap size */ if ((data & 0xff) * 8 < altsetting) return false; if (data & (1ULL << (altsetting + 8))) return true; return false; } /* * Validate each sample rate with the altsetting * Rebuild the rate table if only partial values are valid */ static int validate_sample_rate_table_v2v3(struct snd_usb_audio *chip, struct audioformat *fp, int clock) { struct usb_device *dev = chip->dev; struct usb_host_interface *alts; unsigned int *table; unsigned int nr_rates; int i, err; u32 bmControls; /* performing the rate verification may lead to unexpected USB bus * behavior afterwards by some unknown reason. Do this only for the * known devices. */ if (!(chip->quirk_flags & QUIRK_FLAG_VALIDATE_RATES)) return 0; /* don't perform the validation as default */ alts = snd_usb_get_host_interface(chip, fp->iface, fp->altsetting); if (!alts) return 0; if (fp->protocol == UAC_VERSION_3) { struct uac3_as_header_descriptor *as = snd_usb_find_csint_desc( alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); bmControls = le32_to_cpu(as->bmControls); } else { struct uac2_as_header_descriptor *as = snd_usb_find_csint_desc( alts->extra, alts->extralen, NULL, UAC_AS_GENERAL); bmControls = as->bmControls; } if (!uac_v2v3_control_is_readable(bmControls, UAC2_AS_VAL_ALT_SETTINGS)) return 0; table = kcalloc(fp->nr_rates, sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; /* clear the interface altsetting at first */ usb_set_interface(dev, fp->iface, 0); nr_rates = 0; for (i = 0; i < fp->nr_rates; i++) { err = snd_usb_set_sample_rate_v2v3(chip, fp, clock, fp->rate_table[i]); if (err < 0) continue; if (check_valid_altsetting_v2v3(chip, fp->iface, fp->altsetting)) table[nr_rates++] = fp->rate_table[i]; } if (!nr_rates) { usb_audio_dbg(chip, "No valid sample rate available for %d:%d, assuming a firmware bug\n", fp->iface, fp->altsetting); nr_rates = fp->nr_rates; /* continue as is */ } if (fp->nr_rates == nr_rates) { kfree(table); return 0; } kfree(fp->rate_table); fp->rate_table = table; fp->nr_rates = nr_rates; return 0; } /* * parse the format descriptor and stores the possible sample rates * on the audioformat table (audio class v2 and v3). */ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip, struct audioformat *fp) { struct usb_device *dev = chip->dev; unsigned char tmp[2], *data; int nr_triplets, data_size, ret = 0, ret_l6; int clock = snd_usb_clock_find_source(chip, fp, false); struct usb_host_interface *ctrl_intf; ctrl_intf = snd_usb_find_ctrl_interface(chip, fp->iface); if (clock < 0) { dev_err(&dev->dev, "%s(): unable to find clock source (clock %d)\n", __func__, clock); goto err; } /* get the number of sample rates first by only fetching 2 bytes */ ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_RANGE, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, UAC2_CS_CONTROL_SAM_FREQ << 8, snd_usb_ctrl_intf(ctrl_intf) | (clock << 8), tmp, sizeof(tmp)); if (ret < 0) { /* line6 helix devices don't support UAC2_CS_CONTROL_SAM_FREQ call */ ret_l6 = line6_parse_audio_format_rates_quirk(chip, fp); if (ret_l6 == -ENODEV) { /* no line6 device found continue showing the error */ dev_err(&dev->dev, "%s(): unable to retrieve number of sample rates (clock %d)\n", __func__, clock); goto err; } if (ret_l6 == 0) { dev_info(&dev->dev, "%s(): unable to retrieve number of sample rates: set it to a predefined value (clock %d).\n", __func__, clock); return 0; } ret = ret_l6; goto err; } nr_triplets = (tmp[1] << 8) | tmp[0]; data_size = 2 + 12 * nr_triplets; data = kzalloc(data_size, GFP_KERNEL); if (!data) { ret = -ENOMEM; goto err; } /* now get the full information */ ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_RANGE, USB_TYPE_CLASS | USB_RECIP_INTERFACE | USB_DIR_IN, UAC2_CS_CONTROL_SAM_FREQ << 8, snd_usb_ctrl_intf(ctrl_intf) | (clock << 8), data, data_size); if (ret < 0) { dev_err(&dev->dev, "%s(): unable to retrieve sample rate range (clock %d)\n", __func__, clock); ret = -EINVAL; goto err_free; } /* Call the triplet parser, and make sure fp->rate_table is NULL. * We just use the return value to know how many sample rates we * will have to deal with. */ kfree(fp->rate_table); fp->rate_table = NULL; fp->nr_rates = parse_uac2_sample_rate_range(chip, fp, nr_triplets, data); if (fp->nr_rates == 0) { /* SNDRV_PCM_RATE_CONTINUOUS */ ret = 0; goto err_free; } fp->rate_table = kmalloc_array(fp->nr_rates, sizeof(int), GFP_KERNEL); if (!fp->rate_table) { ret = -ENOMEM; goto err_free; } /* Call the triplet parser again, but this time, fp->rate_table is * allocated, so the rates will be stored */ parse_uac2_sample_rate_range(chip, fp, nr_triplets, data); ret = validate_sample_rate_table_v2v3(chip, fp, clock); if (ret < 0) goto err_free; set_rate_table_min_max(fp); err_free: kfree(data); err: return ret; } /* * parse the format type I and III descriptors */ static int parse_audio_format_i(struct snd_usb_audio *chip, struct audioformat *fp, u64 format, void *_fmt) { snd_pcm_format_t pcm_format; unsigned int fmt_type; int ret; switch (fp->protocol) { default: case UAC_VERSION_1: case UAC_VERSION_2: { struct uac_format_type_i_continuous_descriptor *fmt = _fmt; fmt_type = fmt->bFormatType; break; } case UAC_VERSION_3: { /* fp->fmt_type is already set in this case */ fmt_type = fp->fmt_type; break; } } if (fmt_type == UAC_FORMAT_TYPE_III) { /* FIXME: the format type is really IECxxx * but we give normal PCM format to get the existing * apps working... */ switch (chip->usb_id) { case USB_ID(0x0763, 0x2003): /* M-Audio Audiophile USB */ if (chip->setup == 0x00 && fp->altsetting == 6) pcm_format = SNDRV_PCM_FORMAT_S16_BE; else pcm_format = SNDRV_PCM_FORMAT_S16_LE; break; default: pcm_format = SNDRV_PCM_FORMAT_S16_LE; } fp->formats = pcm_format_to_bits(pcm_format); } else { fp->formats = parse_audio_format_i_type(chip, fp, format, _fmt); if (!fp->formats) return -EINVAL; } /* gather possible sample rates */ /* audio class v1 reports possible sample rates as part of the * proprietary class specific descriptor. * audio class v2 uses class specific EP0 range requests for that. */ switch (fp->protocol) { default: case UAC_VERSION_1: { struct uac_format_type_i_continuous_descriptor *fmt = _fmt; fp->channels = fmt->bNrChannels; ret = parse_audio_format_rates_v1(chip, fp, (unsigned char *) fmt, 7); break; } case UAC_VERSION_2: case UAC_VERSION_3: { /* fp->channels is already set in this case */ ret = parse_audio_format_rates_v2v3(chip, fp); break; } } if (fp->channels < 1) { usb_audio_err(chip, "%u:%d : invalid channels %d\n", fp->iface, fp->altsetting, fp->channels); return -EINVAL; } return ret; } /* * parse the format type II descriptor */ static int parse_audio_format_ii(struct snd_usb_audio *chip, struct audioformat *fp, u64 format, void *_fmt) { int brate, framesize, ret; switch (format) { case UAC_FORMAT_TYPE_II_AC3: /* FIXME: there is no AC3 format defined yet */ // fp->formats = SNDRV_PCM_FMTBIT_AC3; fp->formats = SNDRV_PCM_FMTBIT_U8; /* temporary hack to receive byte streams */ break; case UAC_FORMAT_TYPE_II_MPEG: fp->formats = SNDRV_PCM_FMTBIT_MPEG; break; default: usb_audio_info(chip, "%u:%d : unknown format tag %#llx is detected. processed as MPEG.\n", fp->iface, fp->altsetting, format); fp->formats = SNDRV_PCM_FMTBIT_MPEG; break; } fp->channels = 1; switch (fp->protocol) { default: case UAC_VERSION_1: { struct uac_format_type_ii_discrete_descriptor *fmt = _fmt; brate = le16_to_cpu(fmt->wMaxBitRate); framesize = le16_to_cpu(fmt->wSamplesPerFrame); usb_audio_info(chip, "found format II with max.bitrate = %d, frame size=%d\n", brate, framesize); fp->frame_size = framesize; ret = parse_audio_format_rates_v1(chip, fp, _fmt, 8); /* fmt[8..] sample rates */ break; } case UAC_VERSION_2: { struct uac_format_type_ii_ext_descriptor *fmt = _fmt; brate = le16_to_cpu(fmt->wMaxBitRate); framesize = le16_to_cpu(fmt->wSamplesPerFrame); usb_audio_info(chip, "found format II with max.bitrate = %d, frame size=%d\n", brate, framesize); fp->frame_size = framesize; ret = parse_audio_format_rates_v2v3(chip, fp); break; } } return ret; } int snd_usb_parse_audio_format(struct snd_usb_audio *chip, struct audioformat *fp, u64 format, struct uac_format_type_i_continuous_descriptor *fmt, int stream) { int err; switch (fmt->bFormatType) { case UAC_FORMAT_TYPE_I: case UAC_FORMAT_TYPE_III: err = parse_audio_format_i(chip, fp, format, fmt); break; case UAC_FORMAT_TYPE_II: err = parse_audio_format_ii(chip, fp, format, fmt); break; default: usb_audio_info(chip, "%u:%d : format type %d is not supported yet\n", fp->iface, fp->altsetting, fmt->bFormatType); return -ENOTSUPP; } fp->fmt_type = fmt->bFormatType; if (err < 0) return err; #if 1 /* FIXME: temporary hack for extigy/audigy 2 nx/zs */ /* extigy apparently supports sample rates other than 48k * but not in ordinary way. so we enable only 48k atm. */ if (chip->usb_id == USB_ID(0x041e, 0x3000) || chip->usb_id == USB_ID(0x041e, 0x3020) || chip->usb_id == USB_ID(0x041e, 0x3061)) { if (fmt->bFormatType == UAC_FORMAT_TYPE_I && fp->rates != SNDRV_PCM_RATE_48000 && fp->rates != SNDRV_PCM_RATE_96000) return -ENOTSUPP; } #endif return 0; } int snd_usb_parse_audio_format_v3(struct snd_usb_audio *chip, struct audioformat *fp, struct uac3_as_header_descriptor *as, int stream) { u64 format = le64_to_cpu(as->bmFormats); int err; /* * Type I format bits are D0..D6 * This test works because type IV is not supported */ if (format & 0x7f) fp->fmt_type = UAC_FORMAT_TYPE_I; else fp->fmt_type = UAC_FORMAT_TYPE_III; err = parse_audio_format_i(chip, fp, format, as); if (err < 0) return err; return 0; }
/* * Copyright 2017 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: AMD * */ static const qp_table qp_table_422_10bpc_min = { { 6, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 8, 9, 9, 9, 12, 16} }, { 6.5, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 8, 9, 9, 9, 12, 16} }, { 7, { 0, 4, 5, 6, 6, 6, 6, 7, 7, 7, 9, 9, 9, 11, 15} }, { 7.5, { 0, 2, 4, 6, 6, 6, 6, 7, 7, 7, 8, 9, 9, 11, 15} }, { 8, { 0, 2, 3, 5, 5, 6, 6, 7, 7, 7, 8, 8, 9, 11, 14} }, { 8.5, { 0, 2, 3, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 11, 14} }, { 9, { 0, 2, 3, 4, 5, 5, 5, 6, 6, 7, 8, 8, 9, 11, 13} }, { 9.5, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 11, 13} }, { 10, { 0, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 8, 9, 11, 12} }, {10.5, { 0, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 11, 12} }, { 11, { 0, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11} }, {11.5, { 0, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 10, 11} }, { 12, { 0, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9, 10} }, {12.5, { 0, 1, 2, 2, 4, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10} }, { 13, { 0, 1, 2, 2, 4, 4, 4, 5, 5, 6, 6, 6, 8, 8, 9} }, {13.5, { 0, 1, 2, 2, 3, 4, 4, 4, 5, 6, 6, 6, 7, 8, 9} }, { 14, { 0, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 6, 7, 7, 8} }, {14.5, { 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 6, 6, 7, 8} }, { 15, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 6, 6, 8} }, {15.5, { 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, { 16, { 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, 5, 7} }, {16.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6} }, { 17, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 6} }, {17.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, { 18, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 5} }, {18.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 5} }, { 19, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 4} }, {19.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 4} }, { 20, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 3} } }; static const qp_table qp_table_444_8bpc_max = { { 6, { 4, 6, 8, 8, 9, 9, 9, 10, 11, 12, 12, 12, 12, 13, 15} }, { 6.5, { 4, 6, 7, 8, 8, 8, 9, 10, 11, 11, 12, 12, 12, 13, 15} }, { 7, { 4, 5, 7, 7, 8, 8, 8, 9, 10, 11, 11, 12, 12, 13, 14} }, { 7.5, { 4, 5, 6, 7, 7, 8, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, { 8, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, { 8.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, { 9, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 13} }, { 9.5, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 13} }, { 10, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, {10.5, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 10, 11, 12} }, { 11, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 10, 10, 11} }, {11.5, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, { 12, { 2, 3, 4, 5, 6, 6, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, {12.5, { 2, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, { 13, { 1, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 8, 8, 9, 10} }, {13.5, { 1, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10} }, { 14, { 1, 2, 2, 3, 4, 4, 4, 5, 6, 6, 7, 8, 8, 8, 10} }, {14.5, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 9} }, { 15, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, {15.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, { 16, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8} }, {16.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8} }, { 17, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 8} }, {17.5, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 8} }, { 18, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 7} }, {18.5, { 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 6, 7} }, { 19, { 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6} }, {19.5, { 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6} }, { 20, { 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 6} }, {20.5, { 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 4, 6} }, { 21, { 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, {21.5, { 0, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, { 22, { 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 5} }, {22.5, { 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} }, { 23, { 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 4} }, {23.5, { 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 4} }, { 24, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 2, 4} } }; static const qp_table qp_table_420_12bpc_max = { { 4, {11, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 21, 22} }, { 4.5, {10, 11, 12, 13, 14, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, { 5, { 9, 11, 12, 13, 14, 15, 15, 16, 17, 17, 18, 18, 19, 20, 21} }, { 5.5, { 8, 10, 11, 12, 13, 14, 15, 16, 16, 17, 17, 18, 18, 19, 20} }, { 6, { 6, 9, 11, 12, 13, 14, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, { 6.5, { 6, 8, 10, 11, 11, 13, 14, 15, 15, 16, 16, 17, 17, 18, 19} }, { 7, { 5, 7, 9, 10, 10, 12, 13, 14, 14, 15, 16, 16, 17, 17, 18} }, { 7.5, { 5, 7, 8, 9, 9, 11, 12, 13, 14, 14, 15, 15, 16, 16, 17} }, { 8, { 4, 6, 7, 8, 8, 10, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, { 8.5, { 3, 6, 6, 7, 7, 10, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, { 9, { 3, 5, 6, 7, 7, 10, 11, 12, 12, 13, 13, 14, 14, 14, 15} }, { 9.5, { 2, 5, 6, 6, 7, 9, 10, 11, 12, 12, 13, 13, 13, 14, 15} }, { 10, { 2, 4, 5, 6, 7, 8, 9, 10, 11, 12, 12, 13, 13, 13, 15} }, {10.5, { 2, 3, 5, 5, 6, 7, 8, 9, 11, 11, 12, 12, 12, 12, 14} }, { 11, { 1, 3, 4, 5, 6, 6, 7, 9, 10, 11, 11, 11, 12, 12, 13} }, {11.5, { 1, 2, 3, 4, 5, 6, 6, 8, 9, 10, 10, 11, 11, 11, 13} }, { 12, { 1, 1, 3, 3, 4, 5, 6, 7, 8, 9, 9, 10, 10, 10, 12} }, {12.5, { 1, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 9, 10, 11} }, { 13, { 1, 1, 1, 2, 4, 4, 6, 6, 7, 8, 8, 9, 9, 9, 11} }, {13.5, { 1, 1, 1, 2, 3, 4, 5, 5, 6, 7, 8, 8, 8, 9, 11} }, { 14, { 1, 1, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 8, 10} }, {14.5, { 0, 1, 1, 1, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, { 15, { 0, 1, 1, 1, 1, 2, 3, 3, 5, 5, 5, 6, 6, 7, 9} }, {15.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8} }, { 16, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 7} }, {16.5, { 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 7} }, { 17, { 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 6} }, {17.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 6} }, { 18, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 5} } }; static const qp_table qp_table_444_10bpc_min = { { 6, { 0, 4, 7, 7, 9, 9, 9, 9, 9, 10, 10, 10, 10, 12, 18} }, { 6.5, { 0, 4, 6, 7, 8, 8, 9, 9, 9, 9, 10, 10, 10, 12, 18} }, { 7, { 0, 4, 6, 6, 8, 8, 8, 8, 8, 9, 9, 10, 10, 12, 17} }, { 7.5, { 0, 4, 6, 6, 7, 8, 8, 8, 8, 8, 9, 9, 10, 12, 17} }, { 8, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 8, 9, 9, 9, 12, 16} }, { 8.5, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 8, 9, 9, 9, 12, 16} }, { 9, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, { 9.5, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, { 10, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 15} }, {10.5, { 0, 4, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 15} }, { 11, { 0, 3, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, {11.5, { 0, 3, 5, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, { 12, { 0, 2, 4, 4, 6, 6, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, {12.5, { 0, 2, 4, 4, 6, 6, 7, 7, 7, 7, 8, 9, 9, 11, 14} }, { 13, { 0, 2, 4, 4, 5, 6, 7, 7, 7, 7, 8, 9, 9, 11, 13} }, {13.5, { 0, 2, 3, 4, 5, 6, 6, 7, 7, 7, 8, 9, 9, 11, 13} }, { 14, { 0, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 9, 9, 11, 13} }, {14.5, { 0, 2, 3, 4, 5, 5, 6, 6, 6, 7, 7, 8, 9, 11, 12} }, { 15, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 9, 11, 12} }, {15.5, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 9, 11, 12} }, { 16, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8, 8, 10, 11} }, {16.5, { 0, 1, 2, 3, 4, 5, 5, 6, 6, 6, 7, 8, 8, 10, 11} }, { 17, { 0, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8, 9, 11} }, {17.5, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 11} }, { 18, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10} }, {18.5, { 0, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 8, 9, 10} }, { 19, { 0, 1, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 7, 8, 9} }, {19.5, { 0, 1, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 7, 8, 9} }, { 20, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 9} }, {20.5, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 9} }, { 21, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6, 6, 7, 9} }, {21.5, { 0, 1, 1, 2, 2, 2, 3, 4, 4, 4, 5, 6, 6, 7, 8} }, { 22, { 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 8} }, {22.5, { 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, { 23, { 0, 0, 1, 2, 2, 2, 3, 3, 3, 3, 5, 5, 5, 5, 7} }, {23.5, { 0, 0, 0, 2, 2, 2, 3, 3, 3, 3, 5, 5, 5, 5, 7} }, { 24, { 0, 0, 0, 1, 1, 2, 3, 3, 3, 3, 4, 4, 4, 5, 7} }, {24.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 5, 7} }, { 25, { 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 4, 4, 4, 4, 6} }, {25.5, { 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, { 26, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 3, 5} }, {26.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 5} }, { 27, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 5} }, {27.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 5} }, { 28, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 4} }, {28.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 4} }, { 29, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3} }, {29.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3} }, { 30, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} } }; static const qp_table qp_table_420_8bpc_max = { { 4, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 13, 14} }, { 4.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, { 5, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 12, 13} }, { 5.5, { 3, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12} }, { 6, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, { 6.5, { 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, { 7, { 1, 2, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 9, 10} }, { 7.5, { 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 8, 9} }, { 8, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, { 8.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8} }, { 9, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7} }, { 9.5, { 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, { 10, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6} }, {10.5, { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 4, 6} }, { 11, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5} }, {11.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4, 5} }, { 12, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4} } }; static const qp_table qp_table_444_8bpc_min = { { 6, { 0, 1, 3, 3, 5, 5, 5, 5, 5, 6, 6, 6, 6, 9, 14} }, { 6.5, { 0, 1, 2, 3, 4, 4, 5, 5, 5, 5, 6, 6, 6, 9, 14} }, { 7, { 0, 0, 2, 2, 4, 4, 4, 4, 4, 5, 5, 6, 6, 9, 13} }, { 7.5, { 0, 0, 2, 2, 3, 4, 4, 4, 4, 4, 5, 5, 6, 9, 13} }, { 8, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 4, 5, 5, 5, 8, 12} }, { 8.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 4, 5, 5, 5, 8, 12} }, { 9, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 12} }, { 9.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 12} }, { 10, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, {10.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, { 11, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, {11.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, { 12, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, {12.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 4, 5, 5, 7, 10} }, { 13, { 0, 0, 1, 1, 2, 3, 3, 3, 3, 3, 4, 5, 5, 7, 9} }, {13.5, { 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, { 14, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, {14.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 4, 4, 5, 7, 8} }, { 15, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, {15.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, { 16, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, {16.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, { 17, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, {17.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, { 18, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, {18.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, { 19, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5} }, {19.5, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 3, 4, 5} }, { 20, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 5} }, {20.5, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 2, 3, 5} }, { 21, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4} }, {21.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4} }, { 22, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 4} }, {22.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3} }, { 23, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} }, {23.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} }, { 24, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3} } }; static const qp_table qp_table_444_12bpc_min = { { 6, { 0, 5, 11, 11, 13, 13, 13, 13, 13, 14, 14, 14, 14, 17, 22} }, { 6.5, { 0, 5, 10, 11, 12, 12, 13, 13, 13, 13, 14, 14, 14, 17, 22} }, { 7, { 0, 5, 10, 10, 12, 12, 12, 12, 12, 13, 13, 14, 14, 17, 21} }, { 7.5, { 0, 5, 9, 10, 11, 12, 12, 12, 12, 12, 13, 13, 14, 17, 21} }, { 8, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 12, 13, 13, 13, 16, 20} }, { 8.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 12, 13, 13, 13, 16, 20} }, { 9, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, { 9.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, { 10, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, {10.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, { 11, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, {11.5, { 0, 4, 8, 9, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, { 12, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, {12.5, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, { 13, { 0, 4, 7, 8, 9, 11, 11, 11, 11, 11, 13, 13, 13, 15, 17} }, {13.5, { 0, 3, 6, 7, 9, 10, 10, 11, 11, 11, 12, 13, 13, 15, 17} }, { 14, { 0, 3, 5, 6, 9, 9, 9, 10, 11, 11, 12, 13, 13, 15, 17} }, {14.5, { 0, 2, 5, 6, 8, 9, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, { 15, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, {15.5, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, { 16, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 11, 12, 12, 14, 15} }, {16.5, { 0, 2, 3, 5, 7, 8, 9, 10, 11, 11, 11, 12, 12, 14, 15} }, { 17, { 0, 2, 3, 5, 5, 6, 9, 9, 10, 10, 11, 11, 12, 13, 15} }, {17.5, { 0, 2, 3, 5, 5, 6, 8, 9, 10, 10, 11, 11, 12, 13, 15} }, { 18, { 0, 2, 3, 5, 5, 6, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, {18.5, { 0, 2, 3, 5, 5, 6, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, { 19, { 0, 1, 2, 4, 5, 5, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, {19.5, { 0, 1, 2, 4, 5, 5, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, { 20, { 0, 1, 2, 3, 4, 5, 7, 8, 8, 8, 9, 10, 10, 11, 13} }, {20.5, { 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 9, 10, 10, 11, 13} }, { 21, { 0, 1, 2, 3, 4, 5, 5, 7, 7, 8, 9, 10, 10, 11, 13} }, {21.5, { 0, 1, 2, 3, 3, 4, 5, 7, 7, 8, 9, 10, 10, 11, 12} }, { 22, { 0, 0, 1, 3, 3, 4, 5, 6, 7, 8, 9, 9, 9, 10, 12} }, {22.5, { 0, 0, 1, 3, 3, 4, 5, 6, 7, 8, 9, 9, 9, 10, 11} }, { 23, { 0, 0, 1, 3, 3, 4, 5, 6, 6, 7, 9, 9, 9, 9, 11} }, {23.5, { 0, 0, 1, 3, 3, 4, 5, 6, 6, 7, 9, 9, 9, 9, 11} }, { 24, { 0, 0, 1, 2, 3, 4, 5, 6, 6, 7, 8, 8, 8, 9, 11} }, {24.5, { 0, 0, 1, 2, 3, 4, 4, 6, 6, 7, 8, 8, 8, 9, 11} }, { 25, { 0, 0, 1, 2, 3, 4, 4, 5, 6, 7, 8, 8, 8, 8, 10} }, {25.5, { 0, 0, 1, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 8, 10} }, { 26, { 0, 0, 1, 2, 2, 3, 4, 5, 5, 6, 7, 7, 7, 7, 9} }, {26.5, { 0, 0, 1, 2, 2, 3, 4, 5, 5, 5, 7, 7, 7, 7, 9} }, { 27, { 0, 0, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, {27.5, { 0, 0, 1, 1, 2, 2, 4, 4, 4, 5, 6, 7, 7, 7, 9} }, { 28, { 0, 0, 0, 1, 1, 2, 3, 4, 4, 4, 6, 6, 6, 7, 9} }, {28.5, { 0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 6, 6, 6, 8} }, { 29, { 0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 8} }, {29.5, { 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7} }, { 30, { 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 5, 5, 5, 5, 7} }, {30.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 4, 5, 7} }, { 31, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 4, 5, 7} }, {31.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, { 32, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 6} }, {32.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 6} }, { 33, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 5} }, {33.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 5} }, { 34, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 5} }, {34.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 2, 3, 5} }, { 35, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 4} }, {35.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 4} }, { 36, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 3} } }; static const qp_table qp_table_420_12bpc_min = { { 4, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 21} }, { 4.5, { 0, 4, 8, 9, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, { 5, { 0, 4, 8, 9, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 20} }, { 5.5, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, { 6, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, { 6.5, { 0, 4, 6, 8, 9, 10, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, { 7, { 0, 3, 5, 7, 9, 10, 10, 11, 11, 11, 13, 13, 13, 15, 17} }, { 7.5, { 0, 3, 5, 7, 8, 9, 10, 10, 11, 11, 12, 13, 13, 15, 16} }, { 8, { 0, 2, 4, 6, 7, 9, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, { 8.5, { 0, 2, 4, 6, 6, 9, 9, 10, 11, 11, 12, 12, 13, 14, 15} }, { 9, { 0, 2, 4, 6, 6, 9, 9, 10, 10, 11, 11, 12, 13, 13, 14} }, { 9.5, { 0, 2, 4, 5, 6, 8, 8, 9, 10, 10, 11, 12, 12, 13, 14} }, { 10, { 0, 2, 3, 5, 6, 7, 8, 8, 9, 10, 10, 12, 12, 12, 14} }, {10.5, { 0, 2, 3, 4, 5, 6, 7, 8, 9, 9, 10, 11, 11, 11, 13} }, { 11, { 0, 2, 3, 4, 5, 5, 6, 8, 8, 9, 9, 10, 11, 11, 12} }, {11.5, { 0, 1, 2, 3, 4, 5, 5, 7, 8, 8, 9, 10, 10, 10, 12} }, { 12, { 0, 0, 2, 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 9, 11} }, {12.5, { 0, 0, 1, 2, 3, 4, 5, 6, 7, 7, 8, 8, 8, 9, 10} }, { 13, { 0, 0, 0, 1, 3, 3, 5, 5, 6, 7, 7, 8, 8, 8, 10} }, {13.5, { 0, 0, 0, 1, 2, 3, 4, 4, 5, 6, 7, 7, 7, 8, 10} }, { 14, { 0, 0, 0, 1, 2, 3, 3, 4, 5, 5, 6, 7, 7, 7, 9} }, {14.5, { 0, 0, 0, 0, 1, 2, 3, 3, 4, 4, 5, 6, 6, 6, 8} }, { 15, { 0, 0, 0, 0, 0, 1, 2, 2, 4, 4, 4, 5, 5, 6, 8} }, {15.5, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 7} }, { 16, { 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 6} }, {16.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 6} }, { 17, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 3, 5} }, {17.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 3, 5} }, { 18, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 2, 4} } }; static const qp_table qp_table_422_12bpc_min = { { 6, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 16, 20} }, { 6.5, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 16, 20} }, { 7, { 0, 4, 9, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, { 7.5, { 0, 4, 8, 10, 11, 11, 11, 11, 11, 11, 13, 13, 13, 15, 19} }, { 8, { 0, 4, 7, 8, 10, 11, 11, 11, 11, 11, 13, 13, 13, 15, 18} }, { 8.5, { 0, 3, 6, 8, 9, 10, 10, 11, 11, 11, 12, 13, 13, 15, 18} }, { 9, { 0, 3, 5, 8, 9, 10, 10, 10, 11, 11, 12, 13, 13, 15, 17} }, { 9.5, { 0, 3, 5, 7, 8, 9, 10, 10, 11, 11, 12, 13, 13, 15, 17} }, { 10, { 0, 2, 4, 6, 7, 9, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, {10.5, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 13, 13, 15, 16} }, { 11, { 0, 2, 4, 6, 7, 8, 9, 10, 11, 11, 12, 12, 13, 14, 15} }, {11.5, { 0, 2, 4, 6, 7, 7, 9, 9, 10, 11, 11, 12, 12, 14, 15} }, { 12, { 0, 2, 4, 6, 6, 6, 8, 8, 9, 9, 11, 11, 12, 13, 14} }, {12.5, { 0, 1, 4, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11, 13, 14} }, { 13, { 0, 1, 3, 4, 5, 5, 7, 8, 8, 9, 10, 10, 11, 12, 13} }, {13.5, { 0, 1, 3, 3, 4, 5, 7, 7, 8, 8, 10, 10, 10, 12, 13} }, { 14, { 0, 0, 2, 3, 4, 5, 6, 6, 7, 7, 9, 10, 10, 11, 12} }, {14.5, { 0, 0, 1, 3, 4, 4, 6, 6, 6, 7, 9, 9, 9, 11, 12} }, { 15, { 0, 0, 1, 3, 3, 4, 5, 6, 6, 6, 8, 9, 9, 10, 12} }, {15.5, { 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 8, 8, 8, 10, 11} }, { 16, { 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 8, 8, 8, 9, 11} }, {16.5, { 0, 0, 0, 2, 2, 3, 4, 5, 5, 5, 6, 7, 7, 9, 10} }, { 17, { 0, 0, 0, 1, 2, 2, 4, 4, 4, 5, 6, 6, 6, 8, 10} }, {17.5, { 0, 0, 0, 1, 2, 2, 3, 4, 4, 4, 5, 6, 6, 8, 9} }, { 18, { 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 5, 5, 6, 7, 9} }, {18.5, { 0, 0, 0, 1, 2, 2, 3, 3, 3, 3, 5, 5, 5, 7, 9} }, { 19, { 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 6, 8} }, {19.5, { 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 6, 8} }, { 20, { 0, 0, 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 7} }, {20.5, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 7} }, { 21, { 0, 0, 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, {21.5, { 0, 0, 0, 0, 0, 0, 1, 2, 2, 2, 3, 3, 3, 4, 6} }, { 22, { 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 6} }, {22.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 5} }, { 23, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 2, 2, 3, 5} }, {23.5, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 4} }, { 24, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 4} } }; static const qp_table qp_table_422_12bpc_max = { { 6, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, { 6.5, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, { 7, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 20} }, { 7.5, { 9, 10, 12, 14, 15, 15, 15, 16, 16, 17, 17, 18, 18, 19, 20} }, { 8, { 6, 9, 10, 12, 14, 15, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, { 8.5, { 6, 8, 9, 11, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18, 19} }, { 9, { 5, 7, 8, 10, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 18} }, { 9.5, { 5, 7, 7, 9, 10, 12, 12, 13, 14, 14, 15, 15, 16, 17, 18} }, { 10, { 4, 6, 6, 8, 9, 11, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, {10.5, { 4, 6, 6, 8, 9, 10, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, { 11, { 4, 5, 6, 8, 9, 10, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, {11.5, { 3, 5, 6, 8, 9, 9, 11, 11, 12, 13, 13, 14, 14, 15, 16} }, { 12, { 3, 5, 6, 8, 8, 8, 10, 10, 11, 11, 13, 13, 14, 14, 15} }, {12.5, { 3, 4, 6, 7, 8, 8, 9, 10, 10, 11, 12, 13, 13, 14, 15} }, { 13, { 2, 4, 5, 6, 7, 7, 9, 10, 10, 11, 12, 12, 13, 13, 14} }, {13.5, { 2, 4, 5, 5, 6, 7, 9, 9, 10, 10, 12, 12, 12, 13, 14} }, { 14, { 2, 3, 4, 5, 6, 7, 8, 8, 9, 9, 11, 12, 12, 12, 13} }, {14.5, { 2, 3, 3, 5, 6, 6, 8, 8, 8, 9, 11, 11, 11, 12, 13} }, { 15, { 2, 3, 3, 5, 5, 6, 7, 8, 8, 8, 10, 11, 11, 11, 13} }, {15.5, { 2, 2, 3, 4, 5, 6, 7, 7, 8, 8, 10, 10, 10, 11, 12} }, { 16, { 2, 2, 3, 4, 5, 6, 7, 7, 8, 8, 10, 10, 10, 10, 12} }, {16.5, { 1, 2, 2, 4, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 11} }, { 17, { 1, 1, 2, 3, 4, 4, 6, 6, 6, 7, 8, 8, 8, 9, 11} }, {17.5, { 1, 1, 2, 3, 4, 4, 5, 6, 6, 6, 7, 8, 8, 9, 10} }, { 18, { 1, 1, 1, 2, 3, 3, 5, 5, 5, 6, 7, 7, 8, 8, 10} }, {18.5, { 1, 1, 1, 2, 3, 3, 5, 5, 5, 5, 7, 7, 7, 8, 10} }, { 19, { 1, 1, 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 7, 9} }, {19.5, { 1, 1, 1, 2, 2, 2, 4, 5, 5, 5, 6, 6, 6, 7, 9} }, { 20, { 1, 1, 1, 2, 2, 2, 4, 5, 5, 5, 6, 6, 6, 6, 8} }, {20.5, { 0, 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 8} }, { 21, { 0, 0, 0, 1, 1, 2, 3, 3, 4, 4, 4, 5, 5, 5, 7} }, {21.5, { 0, 0, 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 7} }, { 22, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5, 7} }, {22.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 6} }, { 23, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 3, 4, 6} }, {23.5, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 5} }, { 24, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 3, 5} } }; static const qp_table qp_table_444_12bpc_max = { { 6, {12, 14, 16, 16, 17, 17, 17, 18, 19, 20, 20, 20, 20, 21, 23} }, { 6.5, {12, 14, 15, 16, 16, 16, 17, 18, 19, 19, 20, 20, 20, 21, 23} }, { 7, {12, 13, 15, 15, 16, 16, 16, 17, 18, 19, 19, 20, 20, 21, 22} }, { 7.5, {12, 13, 14, 15, 15, 16, 16, 17, 18, 18, 19, 19, 20, 21, 22} }, { 8, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, { 8.5, {12, 12, 13, 14, 15, 15, 15, 16, 17, 18, 18, 19, 19, 20, 21} }, { 9, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 21} }, { 9.5, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 21} }, { 10, {11, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 19, 19, 20} }, {10.5, {10, 12, 13, 14, 15, 15, 15, 16, 17, 17, 18, 18, 18, 19, 20} }, { 11, { 9, 11, 13, 14, 15, 15, 15, 16, 16, 17, 17, 17, 18, 18, 19} }, {11.5, { 9, 11, 13, 14, 15, 15, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, { 12, { 6, 9, 12, 13, 14, 14, 15, 16, 16, 17, 17, 17, 17, 18, 19} }, {12.5, { 6, 9, 12, 13, 14, 14, 14, 15, 15, 16, 16, 17, 17, 18, 19} }, { 13, { 5, 9, 12, 13, 13, 14, 14, 15, 15, 16, 16, 16, 16, 17, 18} }, {13.5, { 5, 8, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 18} }, { 14, { 5, 8, 10, 11, 12, 12, 12, 13, 14, 14, 15, 16, 16, 16, 18} }, {14.5, { 4, 7, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 17} }, { 15, { 4, 7, 9, 10, 10, 11, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, {15.5, { 4, 7, 9, 10, 10, 11, 11, 12, 13, 13, 14, 15, 15, 16, 17} }, { 16, { 4, 7, 9, 10, 10, 11, 11, 12, 13, 13, 13, 14, 14, 15, 16} }, {16.5, { 4, 5, 7, 8, 10, 11, 11, 12, 13, 13, 13, 14, 14, 15, 16} }, { 17, { 4, 5, 7, 8, 8, 9, 11, 11, 12, 12, 12, 13, 13, 14, 16} }, {17.5, { 3, 5, 7, 8, 8, 9, 10, 11, 12, 12, 12, 13, 13, 14, 16} }, { 18, { 3, 5, 7, 8, 8, 9, 10, 11, 12, 12, 12, 13, 13, 14, 15} }, {18.5, { 3, 5, 7, 8, 8, 9, 10, 11, 12, 12, 12, 13, 13, 14, 15} }, { 19, { 3, 4, 6, 7, 8, 8, 9, 10, 11, 11, 11, 12, 12, 13, 14} }, {19.5, { 3, 4, 6, 7, 8, 8, 9, 10, 11, 11, 11, 12, 12, 13, 14} }, { 20, { 2, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10, 11, 11, 12, 14} }, {20.5, { 2, 3, 5, 5, 7, 8, 8, 8, 9, 10, 10, 11, 11, 12, 14} }, { 21, { 2, 3, 5, 5, 7, 7, 7, 8, 8, 9, 10, 11, 11, 12, 14} }, {21.5, { 2, 3, 5, 5, 6, 6, 7, 8, 8, 9, 10, 11, 11, 12, 13} }, { 22, { 2, 2, 4, 5, 6, 6, 7, 7, 8, 9, 10, 10, 10, 11, 13} }, {22.5, { 2, 2, 4, 5, 5, 6, 7, 7, 8, 9, 10, 10, 10, 11, 12} }, { 23, { 2, 2, 4, 5, 5, 6, 7, 7, 7, 8, 10, 10, 10, 10, 12} }, {23.5, { 2, 2, 3, 5, 5, 6, 7, 7, 7, 8, 10, 10, 10, 10, 12} }, { 24, { 2, 2, 3, 4, 4, 5, 7, 7, 7, 8, 9, 9, 9, 10, 12} }, {24.5, { 1, 2, 3, 4, 4, 5, 6, 7, 7, 8, 9, 9, 9, 10, 12} }, { 25, { 1, 2, 3, 3, 4, 5, 6, 6, 7, 8, 9, 9, 9, 9, 11} }, {25.5, { 1, 1, 3, 3, 4, 5, 6, 6, 7, 7, 8, 9, 9, 9, 11} }, { 26, { 1, 1, 3, 3, 3, 4, 5, 6, 6, 7, 8, 8, 8, 8, 10} }, {26.5, { 1, 1, 2, 3, 3, 4, 5, 6, 6, 6, 8, 8, 8, 8, 10} }, { 27, { 1, 1, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 10} }, {27.5, { 1, 1, 2, 2, 3, 3, 5, 5, 5, 6, 7, 8, 8, 8, 10} }, { 28, { 0, 1, 1, 2, 2, 3, 4, 5, 5, 5, 7, 7, 7, 8, 10} }, {28.5, { 0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, { 29, { 0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6, 6, 7, 7, 9} }, {29.5, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8} }, { 30, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 6, 6, 6, 6, 8} }, {30.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 8} }, { 31, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 5, 6, 8} }, {31.5, { 0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 8} }, { 32, { 0, 0, 0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 7} }, {32.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 4, 4, 4, 5, 7} }, { 33, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, {33.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, { 34, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 3, 4, 6} }, {34.5, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 3, 3, 3, 3, 4, 6} }, { 35, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 5} }, {35.5, { 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 5} }, { 36, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 4} } }; static const qp_table qp_table_420_8bpc_min = { { 4, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 9, 13} }, { 4.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, { 5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, { 5.5, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, { 6, { 0, 0, 1, 1, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, { 6.5, { 0, 0, 1, 1, 2, 2, 3, 3, 3, 3, 4, 5, 5, 7, 10} }, { 7, { 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, { 7.5, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 4, 4, 5, 7, 8} }, { 8, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, { 8.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, { 9, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6} }, { 9.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, { 10, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5} }, {10.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 3, 5} }, { 11, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4} }, {11.5, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4} }, { 12, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 3} } }; static const qp_table qp_table_422_8bpc_min = { { 6, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, { 6.5, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 8, 12} }, { 7, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, { 7.5, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 11} }, { 8, { 0, 0, 1, 2, 3, 3, 3, 3, 3, 3, 5, 5, 5, 7, 10} }, { 8.5, { 0, 0, 1, 2, 2, 2, 2, 3, 3, 3, 4, 5, 5, 7, 10} }, { 9, { 0, 0, 0, 1, 2, 2, 2, 2, 2, 3, 4, 5, 5, 7, 9} }, { 9.5, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 9} }, { 10, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, {10.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 7, 8} }, { 11, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, {11.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 6, 7} }, { 12, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 4, 4, 5, 5, 6} }, {12.5, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 5, 6} }, { 13, { 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 4, 5} }, {13.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 4, 5} }, { 14, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4} }, {14.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 4} }, { 15, { 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 4} }, {15.5, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3} }, { 16, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3} } }; static const qp_table qp_table_422_10bpc_max = { { 6, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, { 6.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, { 7, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, { 7.5, { 5, 6, 8, 10, 11, 11, 11, 12, 12, 13, 13, 14, 14, 15, 16} }, { 8, { 4, 6, 7, 9, 10, 11, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, { 8.5, { 4, 5, 6, 8, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 15} }, { 9, { 3, 4, 5, 7, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14} }, { 9.5, { 3, 4, 4, 6, 6, 8, 8, 9, 10, 10, 11, 11, 12, 13, 14} }, { 10, { 2, 3, 3, 5, 5, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, {10.5, { 2, 3, 3, 5, 5, 6, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, { 11, { 2, 3, 3, 5, 5, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, {11.5, { 2, 3, 3, 5, 5, 5, 7, 7, 8, 9, 9, 10, 10, 11, 12} }, { 12, { 2, 3, 3, 5, 5, 5, 7, 7, 8, 8, 9, 9, 10, 10, 11} }, {12.5, { 2, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, { 13, { 1, 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10} }, {13.5, { 1, 2, 3, 3, 4, 5, 6, 6, 7, 7, 8, 8, 8, 9, 10} }, { 14, { 1, 2, 3, 3, 4, 5, 5, 5, 6, 6, 7, 8, 8, 8, 9} }, {14.5, { 1, 2, 2, 3, 4, 4, 5, 5, 5, 6, 7, 7, 7, 8, 9} }, { 15, { 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7, 7, 7, 9} }, {15.5, { 1, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 6, 7, 8} }, { 16, { 1, 1, 2, 2, 3, 4, 4, 4, 5, 5, 6, 6, 6, 6, 8} }, {16.5, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 7} }, { 17, { 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 7} }, {17.5, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 5, 6} }, { 18, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 6} }, {18.5, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 4, 6} }, { 19, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 5} }, {19.5, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 5} }, { 20, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 4} } }; static const qp_table qp_table_420_10bpc_max = { { 4, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 17, 18} }, { 4.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, { 5, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 16, 17} }, { 5.5, { 6, 7, 8, 9, 10, 10, 11, 12, 12, 13, 13, 14, 14, 15, 16} }, { 6, { 4, 6, 8, 9, 10, 10, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, { 6.5, { 4, 5, 7, 8, 8, 9, 10, 11, 11, 12, 12, 13, 13, 14, 15} }, { 7, { 3, 4, 6, 7, 7, 8, 9, 10, 10, 11, 12, 12, 13, 13, 14} }, { 7.5, { 3, 4, 5, 6, 6, 7, 8, 9, 10, 10, 11, 11, 12, 12, 13} }, { 8, { 2, 3, 4, 5, 5, 6, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, { 8.5, { 1, 3, 3, 4, 4, 6, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, { 9, { 1, 3, 3, 4, 4, 6, 7, 8, 8, 9, 9, 10, 10, 10, 11} }, { 9.5, { 1, 3, 3, 3, 4, 5, 6, 7, 8, 8, 9, 9, 9, 10, 11} }, { 10, { 1, 2, 3, 3, 4, 4, 5, 6, 7, 8, 8, 9, 9, 9, 11} }, {10.5, { 1, 1, 3, 3, 3, 4, 5, 5, 7, 7, 8, 8, 8, 8, 10} }, { 11, { 0, 1, 2, 3, 3, 3, 4, 5, 6, 7, 7, 7, 8, 8, 9} }, {11.5, { 0, 1, 1, 2, 3, 3, 3, 4, 5, 6, 6, 7, 7, 7, 9} }, { 12, { 0, 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 6, 8} }, {12.5, { 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 5, 6, 7} }, { 13, { 0, 0, 0, 1, 2, 2, 3, 3, 3, 4, 4, 5, 5, 5, 7} }, {13.5, { 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 4, 6} }, { 14, { 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, {14.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 5} }, { 15, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 5} } }; static const qp_table qp_table_420_10bpc_min = { { 4, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 13, 17} }, { 4.5, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, { 5, { 0, 4, 4, 5, 7, 7, 7, 7, 7, 7, 9, 9, 9, 12, 16} }, { 5.5, { 0, 3, 3, 4, 6, 7, 7, 7, 7, 7, 9, 9, 9, 11, 15} }, { 6, { 0, 2, 3, 4, 6, 7, 7, 7, 7, 7, 9, 9, 9, 11, 14} }, { 6.5, { 0, 2, 3, 4, 5, 6, 6, 7, 7, 7, 8, 9, 9, 11, 14} }, { 7, { 0, 2, 3, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 11, 13} }, { 7.5, { 0, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8, 8, 9, 11, 12} }, { 8, { 0, 2, 3, 4, 4, 5, 5, 6, 6, 7, 8, 8, 9, 11, 12} }, { 8.5, { 0, 2, 2, 3, 3, 5, 5, 6, 6, 7, 8, 8, 9, 10, 11} }, { 9, { 0, 2, 2, 3, 3, 5, 5, 6, 6, 7, 7, 8, 9, 9, 10} }, { 9.5, { 0, 2, 2, 2, 3, 4, 4, 5, 6, 6, 7, 8, 8, 9, 10} }, { 10, { 0, 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 8, 8, 8, 10} }, {10.5, { 0, 0, 2, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 7, 9} }, { 11, { 0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 5, 6, 7, 7, 8} }, {11.5, { 0, 0, 0, 1, 2, 2, 2, 3, 4, 4, 5, 6, 6, 6, 8} }, { 12, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 4, 4, 5, 5, 5, 7} }, {12.5, { 0, 0, 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 6} }, { 13, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, {13.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 3, 3, 3, 3, 5} }, { 14, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 5} }, {14.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 4} }, { 15, { 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 4} } }; static const qp_table qp_table_444_10bpc_max = { { 6, { 8, 10, 12, 12, 13, 13, 13, 14, 15, 16, 16, 16, 16, 17, 19} }, { 6.5, { 8, 10, 11, 12, 12, 12, 13, 14, 15, 15, 16, 16, 16, 17, 19} }, { 7, { 8, 9, 11, 11, 12, 12, 12, 13, 14, 15, 15, 16, 16, 17, 18} }, { 7.5, { 8, 9, 10, 11, 11, 12, 12, 13, 14, 14, 15, 15, 16, 17, 18} }, { 8, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, { 8.5, { 8, 8, 9, 10, 11, 11, 11, 12, 13, 14, 14, 15, 15, 16, 17} }, { 9, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 17} }, { 9.5, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 17} }, { 10, { 7, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 15, 15, 16} }, {10.5, { 6, 8, 9, 10, 11, 11, 11, 12, 13, 13, 14, 14, 14, 15, 16} }, { 11, { 5, 7, 9, 10, 11, 11, 11, 12, 12, 13, 13, 13, 14, 14, 15} }, {11.5, { 5, 7, 9, 10, 11, 11, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, { 12, { 4, 6, 8, 9, 10, 10, 11, 12, 12, 13, 13, 13, 13, 14, 15} }, {12.5, { 4, 6, 8, 9, 10, 10, 10, 11, 11, 12, 12, 13, 13, 14, 15} }, { 13, { 3, 6, 8, 9, 9, 10, 10, 11, 11, 12, 12, 12, 12, 13, 14} }, {13.5, { 3, 5, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 14} }, { 14, { 3, 5, 6, 7, 8, 8, 8, 9, 10, 10, 11, 12, 12, 12, 14} }, {14.5, { 2, 4, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 13} }, { 15, { 2, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, {15.5, { 2, 4, 5, 6, 6, 7, 7, 8, 9, 9, 10, 11, 11, 12, 13} }, { 16, { 2, 4, 5, 6, 6, 7, 7, 8, 9, 9, 9, 10, 10, 11, 12} }, {16.5, { 2, 3, 4, 5, 6, 7, 7, 8, 9, 9, 9, 10, 10, 11, 12} }, { 17, { 2, 3, 4, 5, 5, 6, 7, 7, 8, 8, 8, 9, 9, 10, 12} }, {17.5, { 1, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 9, 9, 10, 12} }, { 18, { 1, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 9, 9, 10, 11} }, {18.5, { 1, 3, 4, 5, 5, 6, 6, 7, 8, 8, 8, 9, 9, 10, 11} }, { 19, { 1, 2, 3, 4, 5, 5, 5, 6, 7, 7, 7, 8, 8, 9, 10} }, {19.5, { 1, 2, 3, 4, 5, 5, 5, 6, 7, 7, 7, 8, 8, 9, 10} }, { 20, { 1, 2, 3, 3, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8, 10} }, {20.5, { 1, 2, 3, 3, 4, 5, 5, 5, 5, 6, 6, 7, 7, 8, 10} }, { 21, { 1, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 7, 7, 8, 10} }, {21.5, { 1, 2, 3, 3, 3, 3, 4, 5, 5, 5, 6, 7, 7, 8, 9} }, { 22, { 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 9} }, {22.5, { 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 6, 6, 6, 7, 8} }, { 23, { 1, 1, 2, 3, 3, 3, 4, 4, 4, 4, 6, 6, 6, 6, 8} }, {23.5, { 1, 1, 1, 3, 3, 3, 4, 4, 4, 4, 6, 6, 6, 6, 8} }, { 24, { 1, 1, 1, 2, 2, 3, 4, 4, 4, 4, 5, 5, 5, 6, 8} }, {24.5, { 0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 5, 5, 5, 6, 8} }, { 25, { 0, 1, 1, 1, 2, 3, 3, 3, 4, 4, 5, 5, 5, 5, 7} }, {25.5, { 0, 0, 1, 1, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 7} }, { 26, { 0, 0, 1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 4, 6} }, {26.5, { 0, 0, 0, 1, 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 6} }, { 27, { 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 6} }, {27.5, { 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 6} }, { 28, { 0, 0, 0, 0, 0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 5} }, {28.5, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 5} }, { 29, { 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 2, 3, 3, 4} }, {29.5, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4} }, { 30, { 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 4} } }; static const qp_table qp_table_422_8bpc_max = { { 6, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, { 6.5, { 4, 4, 5, 6, 7, 7, 7, 8, 9, 10, 10, 11, 11, 12, 13} }, { 7, { 3, 4, 5, 6, 7, 7, 7, 8, 9, 9, 10, 10, 11, 11, 12} }, { 7.5, { 3, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 10, 10, 11, 12} }, { 8, { 2, 4, 5, 6, 7, 7, 7, 8, 8, 9, 9, 9, 9, 10, 11} }, { 8.5, { 2, 3, 4, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 11} }, { 9, { 1, 2, 3, 4, 5, 5, 5, 6, 6, 7, 7, 8, 8, 9, 10} }, { 9.5, { 1, 2, 2, 3, 3, 4, 4, 5, 6, 6, 7, 7, 8, 9, 10} }, { 10, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, {10.5, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 7, 7, 8, 9} }, { 11, { 0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 7, 8} }, {11.5, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 5, 5, 6, 6, 7, 8} }, { 12, { 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7} }, {12.5, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6, 7} }, { 13, { 0, 0, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 5, 6} }, {13.5, { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4, 4, 4, 5, 6} }, { 14, { 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5} }, {14.5, { 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 5} }, { 15, { 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 5} }, {15.5, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} }, { 16, { 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 4} } };
/* * BSC9132 Silicon/SoC Device Tree Source (post include) * * Copyright 2014 Freescale Semiconductor Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ &ifc { #address-cells = <2>; #size-cells = <1>; compatible = "fsl,ifc"; /* FIXME: Test whether interrupts are split */ interrupts = <16 2 0 0 20 2 0 0>; }; /* controller at 0xa000 */ &pci0 { compatible = "fsl,bsc9132-pcie", "fsl,qoriq-pcie-v2.2"; device_type = "pci"; #size-cells = <2>; #address-cells = <3>; bus-range = <0 255>; interrupts = <16 2 0 0>; pcie@0 { reg = <0 0 0 0 0>; #interrupt-cells = <1>; #size-cells = <2>; #address-cells = <3>; device_type = "pci"; interrupts = <16 2 0 0>; interrupt-map-mask = <0xf800 0 0 7>; interrupt-map = < /* IDSEL 0x0 */ 0000 0x0 0x0 0x1 &mpic 0x0 0x2 0x0 0x0 0000 0x0 0x0 0x2 &mpic 0x1 0x2 0x0 0x0 0000 0x0 0x0 0x3 &mpic 0x2 0x2 0x0 0x0 0000 0x0 0x0 0x4 &mpic 0x3 0x2 0x0 0x0 >; }; }; &soc { #address-cells = <1>; #size-cells = <1>; device_type = "soc"; compatible = "fsl,bsc9132-immr", "simple-bus"; bus-frequency = <0>; // Filled out by uboot. ecm-law@0 { compatible = "fsl,ecm-law"; reg = <0x0 0x1000>; fsl,num-laws = <12>; }; ecm@1000 { compatible = "fsl,bsc9132-ecm", "fsl,ecm"; reg = <0x1000 0x1000>; interrupts = <16 2 0 0>; }; memory-controller@2000 { compatible = "fsl,bsc9132-memory-controller"; reg = <0x2000 0x1000>; interrupts = <16 2 1 8>; }; /include/ "pq3-i2c-0.dtsi" i2c@3000 { interrupts = <17 2 0 0>; }; /include/ "pq3-i2c-1.dtsi" i2c@3100 { interrupts = <17 2 0 0>; }; /include/ "pq3-duart-0.dtsi" serial0: serial@4500 { interrupts = <18 2 0 0>; }; serial1: serial@4600 { interrupts = <18 2 0 0 >; }; /include/ "pq3-espi-0.dtsi" spi0: spi@7000 { fsl,espi-num-chipselects = <1>; interrupts = <22 0x2 0 0>; }; /include/ "pq3-gpio-0.dtsi" gpio-controller@f000 { interrupts = <19 0x2 0 0>; }; L2: l2-cache-controller@20000 { compatible = "fsl,bsc9132-l2-cache-controller"; reg = <0x20000 0x1000>; cache-line-size = <32>; // 32 bytes cache-size = <0x40000>; // L2,256K interrupts = <16 2 1 0>; }; /include/ "pq3-dma-0.dtsi" dma@21300 { dma-channel@0 { interrupts = <62 2 0 0>; }; dma-channel@80 { interrupts = <63 2 0 0>; }; dma-channel@100 { interrupts = <64 2 0 0>; }; dma-channel@180 { interrupts = <65 2 0 0>; }; }; /include/ "pq3-usb2-dr-0.dtsi" usb@22000 { compatible = "fsl-usb2-dr","fsl-usb2-dr-v2.2"; interrupts = <40 0x2 0 0>; }; /include/ "pq3-esdhc-0.dtsi" sdhc@2e000 { fsl,sdhci-auto-cmd12; interrupts = <41 0x2 0 0>; }; /include/ "pq3-sec4.4-0.dtsi" crypto@30000 { interrupts = <57 2 0 0>; sec_jr0: jr@1000 { interrupts = <58 2 0 0>; }; sec_jr1: jr@2000 { interrupts = <59 2 0 0>; }; sec_jr2: jr@3000 { interrupts = <60 2 0 0>; }; sec_jr3: jr@4000 { interrupts = <61 2 0 0>; }; }; /include/ "pq3-mpic.dtsi" /include/ "pq3-mpic-timer-B.dtsi" /include/ "pq3-etsec2-0.dtsi" enet0: ethernet@b0000 { queue-group@b0000 { interrupts = <26 2 0 0 27 2 0 0 28 2 0 0>; }; }; /include/ "pq3-etsec2-1.dtsi" enet1: ethernet@b1000 { queue-group@b1000 { interrupts = <33 2 0 0 34 2 0 0 35 2 0 0>; }; }; global-utilities@e0000 { compatible = "fsl,bsc9132-guts"; reg = <0xe0000 0x1000>; fsl,has-rstcr; }; };
// SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) /* * Copyright (c) STMicroelectronics 2019 - All Rights Reserved * Copyright (c) 2020 Engicam srl * Copyright (c) 2020 Amarula Solutions(India) */ /dts-v1/; #include "stm32mp157.dtsi" #include "stm32mp157a-microgea-stm32mp1.dtsi" #include "stm32mp15-pinctrl.dtsi" #include "stm32mp15xxaa-pinctrl.dtsi" #include <dt-bindings/gpio/gpio.h> / { model = "Engicam MicroGEA STM32MP1 MicroDev 2.0 7\" Open Frame"; compatible = "engicam,microgea-stm32mp1-microdev2.0-of7", "engicam,microgea-stm32mp1", "st,stm32mp157"; aliases { serial0 = &uart4; serial1 = &uart8; }; chosen { stdout-path = "serial0:115200n8"; }; backlight: backlight { compatible = "gpio-backlight"; gpios = <&gpiod 13 GPIO_ACTIVE_HIGH>; default-on; }; lcd_3v3: regulator-lcd-3v3 { compatible = "regulator-fixed"; regulator-name = "lcd_3v3"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpiof 10 GPIO_ACTIVE_HIGH>; enable-active-high; regulator-always-on; power-supply = <&panel_pwr>; }; panel_pwr: regulator-panel-pwr { compatible = "regulator-fixed"; regulator-name = "panel_pwr"; regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; gpio = <&gpiob 10 GPIO_ACTIVE_HIGH>; regulator-always-on; }; panel { compatible = "auo,b101aw03"; backlight = <&backlight>; enable-gpios = <&gpiof 2 GPIO_ACTIVE_HIGH>; power-supply = <&lcd_3v3>; port { panel_in: endpoint { remote-endpoint = <&ltdc_ep0_out>; }; }; }; }; &i2c2 { i2c-scl-falling-time-ns = <20>; i2c-scl-rising-time-ns = <185>; pinctrl-names = "default", "sleep"; pinctrl-0 = <&i2c2_pins_a>; pinctrl-1 = <&i2c2_sleep_pins_a>; status = "okay"; }; &ltdc { pinctrl-names = "default"; pinctrl-0 = <&ltdc_pins>; status = "okay"; port { ltdc_ep0_out: endpoint { remote-endpoint = <&panel_in>; }; }; }; &pinctrl { ltdc_pins: ltdc-0 { pins { pinmux = <STM32_PINMUX('G', 10, AF14)>, /* LTDC_B2 */ <STM32_PINMUX('H', 12, AF14)>, /* LTDC_R6 */ <STM32_PINMUX('H', 11, AF14)>, /* LTDC_R5 */ <STM32_PINMUX('D', 10, AF14)>, /* LTDC_B3 */ <STM32_PINMUX('D', 9, AF14)>, /* LTDC_B0 */ <STM32_PINMUX('E', 5, AF14)>, /* LTDC_G0 */ <STM32_PINMUX('E', 6, AF14)>, /* LTDC_G1 */ <STM32_PINMUX('E', 13, AF14)>, /* LTDC_DE */ <STM32_PINMUX('E', 15, AF14)>, /* LTDC_R7 */ <STM32_PINMUX('G', 7, AF14)>, /* LTDC_CLK */ <STM32_PINMUX('G', 12, AF14)>, /* LTDC_B1 */ <STM32_PINMUX('H', 2, AF14)>, /* LTDC_R0 */ <STM32_PINMUX('H', 3, AF14)>, /* LTDC_R1 */ <STM32_PINMUX('H', 8, AF14)>, /* LTDC_R2 */ <STM32_PINMUX('H', 9, AF14)>, /* LTDC_R3 */ <STM32_PINMUX('H', 10, AF14)>, /* LTDC_R4 */ <STM32_PINMUX('H', 13, AF14)>, /* LTDC_G2 */ <STM32_PINMUX('H', 14, AF14)>, /* LTDC_G3 */ <STM32_PINMUX('H', 15, AF14)>, /* LTDC_G4 */ <STM32_PINMUX('I', 0, AF14)>, /* LTDC_G5 */ <STM32_PINMUX('I', 1, AF14)>, /* LTDC_G6 */ <STM32_PINMUX('I', 2, AF14)>, /* LTDC_G7 */ <STM32_PINMUX('I', 4, AF14)>, /* LTDC_B4 */ <STM32_PINMUX('I', 5, AF14)>, /* LTDC_B5 */ <STM32_PINMUX('B', 8, AF14)>, /* LTDC_B6 */ <STM32_PINMUX('I', 7, AF14)>, /* LTDC_B7 */ <STM32_PINMUX('I', 9, AF14)>, /* LTDC_VSYNC */ <STM32_PINMUX('I', 10, AF14)>; /* LTDC_HSYNC */ bias-disable; drive-push-pull; slew-rate = <3>; }; }; }; &sdmmc1 { bus-width = <4>; disable-wp; pinctrl-names = "default", "opendrain", "sleep"; pinctrl-0 = <&sdmmc1_b4_pins_a>; pinctrl-1 = <&sdmmc1_b4_od_pins_a>; pinctrl-2 = <&sdmmc1_b4_sleep_pins_a>; st,neg-edge; vmmc-supply = <&vdd>; status = "okay"; }; &uart4 { pinctrl-names = "default", "sleep", "idle"; pinctrl-0 = <&uart4_pins_a>; pinctrl-1 = <&uart4_sleep_pins_a>; pinctrl-2 = <&uart4_idle_pins_a>; /delete-property/dmas; /delete-property/dma-names; status = "okay"; }; /* J31: RS323 */ &uart8 { pinctrl-names = "default"; pinctrl-0 = <&uart8_pins_a>; /delete-property/dmas; /delete-property/dma-names; status = "okay"; };
/* SPDX-License-Identifier: GPL-2.0-only */ /* * Copyright (C) 2020-2024 Intel Corporation */ #ifndef __IVPU_COREDUMP_H__ #define __IVPU_COREDUMP_H__ #include <drm/drm_print.h> #include "ivpu_drv.h" #include "ivpu_fw_log.h" #ifdef CONFIG_DEV_COREDUMP void ivpu_dev_coredump(struct ivpu_device *vdev); #else static inline void ivpu_dev_coredump(struct ivpu_device *vdev) { struct drm_printer p = drm_info_printer(vdev->drm.dev); ivpu_fw_log_print(vdev, false, &p); } #endif #endif /* __IVPU_COREDUMP_H__ */
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015-2018 Etnaviv Project */ #include <linux/kernel.h> #include "etnaviv_gem.h" #include "etnaviv_gpu.h" #include "cmdstream.xml.h" #define EXTRACT(val, field) (((val) & field##__MASK) >> field##__SHIFT) struct etna_validation_state { struct etnaviv_gpu *gpu; const struct drm_etnaviv_gem_submit_reloc *relocs; unsigned int num_relocs; u32 *start; }; static const struct { u16 offset; u16 size; } etnaviv_sensitive_states[] __initconst = { #define ST(start, num) { (start) >> 2, (num) } /* 2D */ ST(0x1200, 1), ST(0x1228, 1), ST(0x1238, 1), ST(0x1284, 1), ST(0x128c, 1), ST(0x1304, 1), ST(0x1310, 1), ST(0x1318, 1), ST(0x12800, 4), ST(0x128a0, 4), ST(0x128c0, 4), ST(0x12970, 4), ST(0x12a00, 8), ST(0x12b40, 8), ST(0x12b80, 8), ST(0x12ce0, 8), /* 3D */ ST(0x0644, 1), ST(0x064c, 1), ST(0x0680, 8), ST(0x086c, 1), ST(0x1028, 1), ST(0x1410, 1), ST(0x1430, 1), ST(0x1458, 1), ST(0x1460, 8), ST(0x1480, 8), ST(0x1500, 8), ST(0x1520, 8), ST(0x1540, 8), ST(0x1608, 1), ST(0x1610, 1), ST(0x1658, 1), ST(0x165c, 1), ST(0x1664, 1), ST(0x1668, 1), ST(0x16a4, 1), ST(0x16c0, 8), ST(0x16e0, 8), ST(0x1740, 8), ST(0x17c0, 8), ST(0x17e0, 8), ST(0x2400, 14 * 16), ST(0x3824, 1), ST(0x10800, 32 * 16), ST(0x14600, 16), ST(0x14800, 8 * 8), #undef ST }; #define ETNAVIV_STATES_SIZE (VIV_FE_LOAD_STATE_HEADER_OFFSET__MASK + 1u) static DECLARE_BITMAP(etnaviv_states, ETNAVIV_STATES_SIZE); void __init etnaviv_validate_init(void) { unsigned int i; for (i = 0; i < ARRAY_SIZE(etnaviv_sensitive_states); i++) bitmap_set(etnaviv_states, etnaviv_sensitive_states[i].offset, etnaviv_sensitive_states[i].size); } static void etnaviv_warn_if_non_sensitive(struct etna_validation_state *state, unsigned int buf_offset, unsigned int state_addr) { if (state->num_relocs && state->relocs->submit_offset < buf_offset) { dev_warn_once(state->gpu->dev, "%s: relocation for non-sensitive state 0x%x at offset %u\n", __func__, state_addr, state->relocs->submit_offset); while (state->num_relocs && state->relocs->submit_offset < buf_offset) { state->relocs++; state->num_relocs--; } } } static bool etnaviv_validate_load_state(struct etna_validation_state *state, u32 *ptr, unsigned int state_offset, unsigned int num) { unsigned int size = min(ETNAVIV_STATES_SIZE, state_offset + num); unsigned int st_offset = state_offset, buf_offset; for_each_set_bit_from(st_offset, etnaviv_states, size) { buf_offset = (ptr - state->start + st_offset - state_offset) * 4; etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4); if (state->num_relocs && state->relocs->submit_offset == buf_offset) { state->relocs++; state->num_relocs--; continue; } dev_warn_ratelimited(state->gpu->dev, "%s: load state touches restricted state 0x%x at offset %u\n", __func__, st_offset * 4, buf_offset); return false; } if (state->num_relocs) { buf_offset = (ptr - state->start + num) * 4; etnaviv_warn_if_non_sensitive(state, buf_offset, st_offset * 4 + state->relocs->submit_offset - buf_offset); } return true; } static uint8_t cmd_length[32] = { [FE_OPCODE_DRAW_PRIMITIVES] = 4, [FE_OPCODE_DRAW_INDEXED_PRIMITIVES] = 6, [FE_OPCODE_DRAW_INSTANCED] = 4, [FE_OPCODE_NOP] = 2, [FE_OPCODE_STALL] = 2, }; bool etnaviv_cmd_validate_one(struct etnaviv_gpu *gpu, u32 *stream, unsigned int size, struct drm_etnaviv_gem_submit_reloc *relocs, unsigned int reloc_size) { struct etna_validation_state state; u32 *buf = stream; u32 *end = buf + size; state.gpu = gpu; state.relocs = relocs; state.num_relocs = reloc_size; state.start = stream; while (buf < end) { u32 cmd = *buf; unsigned int len, n, off; unsigned int op = cmd >> 27; switch (op) { case FE_OPCODE_LOAD_STATE: n = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_COUNT); len = ALIGN(1 + n, 2); if (buf + len > end) break; off = EXTRACT(cmd, VIV_FE_LOAD_STATE_HEADER_OFFSET); if (!etnaviv_validate_load_state(&state, buf + 1, off, n)) return false; break; case FE_OPCODE_DRAW_2D: n = EXTRACT(cmd, VIV_FE_DRAW_2D_HEADER_COUNT); if (n == 0) n = 256; len = 2 + n * 2; break; default: len = cmd_length[op]; if (len == 0) { dev_err(gpu->dev, "%s: op %u not permitted at offset %tu\n", __func__, op, buf - state.start); return false; } break; } buf += len; } if (buf > end) { dev_err(gpu->dev, "%s: commands overflow end of buffer: %tu > %u\n", __func__, buf - state.start, size); return false; } return true; }
/* SPDX-License-Identifier: GPL-2.0 */ /* * Support for Intel Camera Imaging ISP subsystem. * Copyright (c) 2015, Intel Corporation. */ #ifndef __IA_CSS_OB2_TYPES_H #define __IA_CSS_OB2_TYPES_H /* @file * CSS-API header file for Optical Black algorithm parameters. */ /* Optical Black configuration * * ISP2.6.1: OB2 is used. */ #include "ia_css_frac.h" struct ia_css_ob2_config { ia_css_u0_16 level_gr; /** Black level for GR pixels. u0.16, [0,65535], default/ineffective 0 */ ia_css_u0_16 level_r; /** Black level for R pixels. u0.16, [0,65535], default/ineffective 0 */ ia_css_u0_16 level_b; /** Black level for B pixels. u0.16, [0,65535], default/ineffective 0 */ ia_css_u0_16 level_gb; /** Black level for GB pixels. u0.16, [0,65535], default/ineffective 0 */ }; #endif /* __IA_CSS_OB2_TYPES_H */
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 1997, 1999 by Ralf Baechle * Copyright (c) 1999 Silicon Graphics, Inc. */ #ifndef _ASM_BCACHE_H #define _ASM_BCACHE_H #include <linux/types.h> /* Some R4000 / R4400 / R4600 / R5000 machines may have a non-dma-coherent, chipset implemented caches. On machines with other CPUs the CPU does the cache thing itself. */ struct bcache_ops { void (*bc_enable)(void); void (*bc_disable)(void); void (*bc_wback_inv)(unsigned long page, unsigned long size); void (*bc_inv)(unsigned long page, unsigned long size); void (*bc_prefetch_enable)(void); void (*bc_prefetch_disable)(void); bool (*bc_prefetch_is_enabled)(void); }; extern void indy_sc_init(void); #ifdef CONFIG_BOARD_SCACHE extern struct bcache_ops *bcops; static inline void bc_enable(void) { bcops->bc_enable(); } static inline void bc_disable(void) { bcops->bc_disable(); } static inline void bc_wback_inv(unsigned long page, unsigned long size) { bcops->bc_wback_inv(page, size); } static inline void bc_inv(unsigned long page, unsigned long size) { bcops->bc_inv(page, size); } static inline void bc_prefetch_enable(void) { if (bcops->bc_prefetch_enable) bcops->bc_prefetch_enable(); } static inline void bc_prefetch_disable(void) { if (bcops->bc_prefetch_disable) bcops->bc_prefetch_disable(); } static inline bool bc_prefetch_is_enabled(void) { if (bcops->bc_prefetch_is_enabled) return bcops->bc_prefetch_is_enabled(); return false; } #else /* !defined(CONFIG_BOARD_SCACHE) */ /* Not R4000 / R4400 / R4600 / R5000. */ #define bc_enable() do { } while (0) #define bc_disable() do { } while (0) #define bc_wback_inv(page, size) do { } while (0) #define bc_inv(page, size) do { } while (0) #define bc_prefetch_enable() do { } while (0) #define bc_prefetch_disable() do { } while (0) #define bc_prefetch_is_enabled() 0 #endif /* !defined(CONFIG_BOARD_SCACHE) */ #endif /* _ASM_BCACHE_H */
// SPDX-License-Identifier: GPL-2.0 /* * 3-axis accelerometer driver supporting following Bosch-Sensortec chips: * - BMI088 * - BMI085 * - BMI090L * * Copyright (c) 2018-2020, Topic Embedded Products */ #include <linux/module.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include "bmi088-accel.h" static int bmi088_regmap_spi_write(void *context, const void *data, size_t count) { struct spi_device *spi = context; /* Write register is same as generic SPI */ return spi_write(spi, data, count); } static int bmi088_regmap_spi_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) { struct spi_device *spi = context; u8 addr[2]; addr[0] = *(u8 *)reg; addr[0] |= BIT(7); /* Set RW = '1' */ addr[1] = 0; /* Read requires a dummy byte transfer */ return spi_write_then_read(spi, addr, sizeof(addr), val, val_size); } static const struct regmap_bus bmi088_regmap_bus = { .write = bmi088_regmap_spi_write, .read = bmi088_regmap_spi_read, }; static int bmi088_accel_probe(struct spi_device *spi) { struct regmap *regmap; const struct spi_device_id *id = spi_get_device_id(spi); regmap = devm_regmap_init(&spi->dev, &bmi088_regmap_bus, spi, &bmi088_regmap_conf); if (IS_ERR(regmap)) { dev_err(&spi->dev, "Failed to initialize spi regmap\n"); return PTR_ERR(regmap); } return bmi088_accel_core_probe(&spi->dev, regmap, spi->irq, id->driver_data); } static void bmi088_accel_remove(struct spi_device *spi) { bmi088_accel_core_remove(&spi->dev); } static const struct of_device_id bmi088_of_match[] = { { .compatible = "bosch,bmi085-accel" }, { .compatible = "bosch,bmi088-accel" }, { .compatible = "bosch,bmi090l-accel" }, {} }; MODULE_DEVICE_TABLE(of, bmi088_of_match); static const struct spi_device_id bmi088_accel_id[] = { {"bmi085-accel", BOSCH_BMI085}, {"bmi088-accel", BOSCH_BMI088}, {"bmi090l-accel", BOSCH_BMI090L}, {} }; MODULE_DEVICE_TABLE(spi, bmi088_accel_id); static struct spi_driver bmi088_accel_driver = { .driver = { .name = "bmi088_accel_spi", .pm = pm_ptr(&bmi088_accel_pm_ops), .of_match_table = bmi088_of_match, }, .probe = bmi088_accel_probe, .remove = bmi088_accel_remove, .id_table = bmi088_accel_id, }; module_spi_driver(bmi088_accel_driver); MODULE_AUTHOR("Niek van Agt <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("BMI088 accelerometer driver (SPI)"); MODULE_IMPORT_NS("IIO_BMI088");
/* SPDX-License-Identifier: GPL-2.0 */ #ifndef _PROBE_ROMS_H_ #define _PROBE_ROMS_H_ struct pci_dev; extern void __iomem *pci_map_biosrom(struct pci_dev *pdev); extern void pci_unmap_biosrom(void __iomem *rom); extern size_t pci_biosrom_size(struct pci_dev *pdev); #endif
// SPDX-License-Identifier: GPL-2.0 /* * Xen leaves the responsibility for maintaining p2m mappings to the * guests themselves, but it must also access and update the p2m array * during suspend/resume when all the pages are reallocated. * * The logical flat p2m table is mapped to a linear kernel memory area. * For accesses by Xen a three-level tree linked via mfns only is set up to * allow the address space to be sparse. * * Xen * | * p2m_top_mfn * / \ * p2m_mid_mfn p2m_mid_mfn * / / * p2m p2m p2m ... * * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p. * * The p2m_top_mfn level is limited to 1 page, so the maximum representable * pseudo-physical address space is: * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages * * P2M_PER_PAGE depends on the architecture, as a mfn is always * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to * 512 and 1024 entries respectively. * * In short, these structures contain the Machine Frame Number (MFN) of the PFN. * * However not all entries are filled with MFNs. Specifically for all other * leaf entries, or for the top root, or middle one, for which there is a void * entry, we assume it is "missing". So (for example) * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY. * We have a dedicated page p2m_missing with all entries being * INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m * list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns. * * We also have the possibility of setting 1-1 mappings on certain regions, so * that: * pfn_to_mfn(0xc0000)=0xc0000 * * The benefit of this is, that we can assume for non-RAM regions (think * PCI BARs, or ACPI spaces), we can create mappings easily because we * get the PFN value to match the MFN. * * For this to work efficiently we have one new page p2m_identity. All entries * in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only * recognizes that and MFNs, no other fancy value). * * On lookup we spot that the entry points to p2m_identity and return the * identity value instead of dereferencing and returning INVALID_P2M_ENTRY. * If the entry points to an allocated page, we just proceed as before and * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in * appropriate functions (pfn_to_mfn). * * The reason for having the IDENTITY_FRAME_BIT instead of just returning the * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a * non-identity pfn. To protect ourselves against we elect to set (and get) the * IDENTITY_FRAME_BIT on all identity mapped PFNs. */ #include <linux/init.h> #include <linux/export.h> #include <linux/list.h> #include <linux/hash.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/memblock.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/acpi.h> #include <asm/cache.h> #include <asm/setup.h> #include <linux/uaccess.h> #include <asm/xen/page.h> #include <asm/xen/hypercall.h> #include <asm/xen/hypervisor.h> #include <xen/balloon.h> #include <xen/grant_table.h> #include <xen/hvc-console.h> #include "xen-ops.h" #define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *)) #define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **)) #define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE) #define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE) unsigned long *xen_p2m_addr __read_mostly; EXPORT_SYMBOL_GPL(xen_p2m_addr); unsigned long xen_p2m_size __read_mostly; EXPORT_SYMBOL_GPL(xen_p2m_size); unsigned long xen_max_p2m_pfn __read_mostly; EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); #ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT #define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT #else #define P2M_LIMIT 0 #endif static DEFINE_SPINLOCK(p2m_update_lock); static unsigned long *p2m_mid_missing_mfn; static unsigned long *p2m_top_mfn; static unsigned long **p2m_top_mfn_p; static unsigned long *p2m_missing; static unsigned long *p2m_identity; static pte_t *p2m_missing_pte; static pte_t *p2m_identity_pte; /* * Hint at last populated PFN. * * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack * can avoid scanning the whole P2M (which may be sized to account for * hotplugged memory). */ static unsigned long xen_p2m_last_pfn; static inline unsigned p2m_top_index(unsigned long pfn) { BUG_ON(pfn >= MAX_P2M_PFN); return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); } static inline unsigned p2m_mid_index(unsigned long pfn) { return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; } static void p2m_top_mfn_init(unsigned long *top) { unsigned i; for (i = 0; i < P2M_TOP_PER_PAGE; i++) top[i] = virt_to_mfn(p2m_mid_missing_mfn); } static void p2m_top_mfn_p_init(unsigned long **top) { unsigned i; for (i = 0; i < P2M_TOP_PER_PAGE; i++) top[i] = p2m_mid_missing_mfn; } static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf) { unsigned i; for (i = 0; i < P2M_MID_PER_PAGE; i++) mid[i] = virt_to_mfn(leaf); } static void p2m_init(unsigned long *p2m) { unsigned i; for (i = 0; i < P2M_PER_PAGE; i++) p2m[i] = INVALID_P2M_ENTRY; } static void p2m_init_identity(unsigned long *p2m, unsigned long pfn) { unsigned i; for (i = 0; i < P2M_PER_PAGE; i++) p2m[i] = IDENTITY_FRAME(pfn + i); } static void * __ref alloc_p2m_page(void) { if (unlikely(!slab_is_available())) { void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); if (!ptr) panic("%s: Failed to allocate %lu bytes align=0x%lx\n", __func__, PAGE_SIZE, PAGE_SIZE); return ptr; } return (void *)__get_free_page(GFP_KERNEL); } static void __ref free_p2m_page(void *p) { if (unlikely(!slab_is_available())) { memblock_free(p, PAGE_SIZE); return; } free_page((unsigned long)p); } /* * Build the parallel p2m_top_mfn and p2m_mid_mfn structures * * This is called both at boot time, and after resuming from suspend: * - At boot time we're called rather early, and must use alloc_bootmem*() * to allocate memory. * * - After resume we're called from within stop_machine, but the mfn * tree should already be completely allocated. */ void __ref xen_build_mfn_list_list(void) { unsigned long pfn, mfn; pte_t *ptep; unsigned int level, topidx, mididx; unsigned long *mid_mfn_p; if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS) return; /* Pre-initialize p2m_top_mfn to be completely missing */ if (p2m_top_mfn == NULL) { p2m_mid_missing_mfn = alloc_p2m_page(); p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); p2m_top_mfn_p = alloc_p2m_page(); p2m_top_mfn_p_init(p2m_top_mfn_p); p2m_top_mfn = alloc_p2m_page(); p2m_top_mfn_init(p2m_top_mfn); } else { /* Reinitialise, mfn's all change after migration */ p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); } for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN; pfn += P2M_PER_PAGE) { topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); mid_mfn_p = p2m_top_mfn_p[topidx]; ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level); BUG_ON(!ptep || level != PG_LEVEL_4K); mfn = pte_mfn(*ptep); ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); /* Don't bother allocating any mfn mid levels if * they're just missing, just update the stored mfn, * since all could have changed over a migrate. */ if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) { BUG_ON(mididx); BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; continue; } if (mid_mfn_p == p2m_mid_missing_mfn) { mid_mfn_p = alloc_p2m_page(); p2m_mid_mfn_init(mid_mfn_p, p2m_missing); p2m_top_mfn_p[topidx] = mid_mfn_p; } p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); mid_mfn_p[mididx] = mfn; } } void xen_setup_mfn_list_list(void) { BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS) HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL; else HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = virt_to_mfn(p2m_top_mfn); HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; HYPERVISOR_shared_info->arch.p2m_generation = 0; HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr; HYPERVISOR_shared_info->arch.p2m_cr3 = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); } /* Set up p2m_top to point to the domain-builder provided p2m pages */ void __init xen_build_dynamic_phys_to_machine(void) { unsigned long pfn; xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list; xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE); for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++) xen_p2m_addr[pfn] = INVALID_P2M_ENTRY; xen_max_p2m_pfn = xen_p2m_size; } #define P2M_TYPE_IDENTITY 0 #define P2M_TYPE_MISSING 1 #define P2M_TYPE_PFN 2 #define P2M_TYPE_UNKNOWN 3 static int xen_p2m_elem_type(unsigned long pfn) { unsigned long mfn; if (pfn >= xen_p2m_size) return P2M_TYPE_IDENTITY; mfn = xen_p2m_addr[pfn]; if (mfn == INVALID_P2M_ENTRY) return P2M_TYPE_MISSING; if (mfn & IDENTITY_FRAME_BIT) return P2M_TYPE_IDENTITY; return P2M_TYPE_PFN; } static void __init xen_rebuild_p2m_list(unsigned long *p2m) { unsigned int i, chunk; unsigned long pfn; unsigned long *mfns; pte_t *ptep; pmd_t *pmdp; int type; p2m_missing = alloc_p2m_page(); p2m_init(p2m_missing); p2m_identity = alloc_p2m_page(); p2m_init(p2m_identity); p2m_missing_pte = alloc_p2m_page(); paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT); p2m_identity_pte = alloc_p2m_page(); paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT); for (i = 0; i < PTRS_PER_PTE; i++) { set_pte(p2m_missing_pte + i, pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO)); set_pte(p2m_identity_pte + i, pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO)); } for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) { /* * Try to map missing/identity PMDs or p2m-pages if possible. * We have to respect the structure of the mfn_list_list * which will be built just afterwards. * Chunk size to test is one p2m page if we are in the middle * of a mfn_list_list mid page and the complete mid page area * if we are at index 0 of the mid page. Please note that a * mid page might cover more than one PMD, e.g. on 32 bit PAE * kernels. */ chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ? P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE; type = xen_p2m_elem_type(pfn); i = 0; if (type != P2M_TYPE_PFN) for (i = 1; i < chunk; i++) if (xen_p2m_elem_type(pfn + i) != type) break; if (i < chunk) /* Reset to minimal chunk size. */ chunk = P2M_PER_PAGE; if (type == P2M_TYPE_PFN || i < chunk) { /* Use initial p2m page contents. */ mfns = alloc_p2m_page(); copy_page(mfns, xen_p2m_addr + pfn); ptep = populate_extra_pte((unsigned long)(p2m + pfn)); set_pte(ptep, pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL)); continue; } if (chunk == P2M_PER_PAGE) { /* Map complete missing or identity p2m-page. */ mfns = (type == P2M_TYPE_MISSING) ? p2m_missing : p2m_identity; ptep = populate_extra_pte((unsigned long)(p2m + pfn)); set_pte(ptep, pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO)); continue; } /* Complete missing or identity PMD(s) can be mapped. */ ptep = (type == P2M_TYPE_MISSING) ? p2m_missing_pte : p2m_identity_pte; for (i = 0; i < PMDS_PER_MID_PAGE; i++) { pmdp = populate_extra_pmd( (unsigned long)(p2m + pfn) + i * PMD_SIZE); set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); } } } void __init xen_vmalloc_p2m_tree(void) { static struct vm_struct vm; unsigned long p2m_limit; xen_p2m_last_pfn = xen_max_p2m_pfn; p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; vm.flags = VM_ALLOC; vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), PMD_SIZE * PMDS_PER_MID_PAGE); vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); xen_max_p2m_pfn = vm.size / sizeof(unsigned long); xen_rebuild_p2m_list(vm.addr); xen_p2m_addr = vm.addr; xen_p2m_size = xen_max_p2m_pfn; xen_inv_extra_mem(); } unsigned long get_phys_to_machine(unsigned long pfn) { pte_t *ptep; unsigned int level; if (unlikely(pfn >= xen_p2m_size)) { if (pfn < xen_max_p2m_pfn) return xen_chk_extra_mem(pfn); return IDENTITY_FRAME(pfn); } ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level); BUG_ON(!ptep || level != PG_LEVEL_4K); /* * The INVALID_P2M_ENTRY is filled in both p2m_*identity * and in p2m_*missing, so returning the INVALID_P2M_ENTRY * would be wrong. */ if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity))) return IDENTITY_FRAME(pfn); return xen_p2m_addr[pfn]; } EXPORT_SYMBOL_GPL(get_phys_to_machine); /* * Allocate new pmd(s). It is checked whether the old pmd is still in place. * If not, nothing is changed. This is okay as the only reason for allocating * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual * pmd. */ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg) { pte_t *ptechk; pte_t *pte_newpg[PMDS_PER_MID_PAGE]; pmd_t *pmdp; unsigned int level; unsigned long flags; unsigned long vaddr; int i; /* Do all allocations first to bail out in error case. */ for (i = 0; i < PMDS_PER_MID_PAGE; i++) { pte_newpg[i] = alloc_p2m_page(); if (!pte_newpg[i]) { for (i--; i >= 0; i--) free_p2m_page(pte_newpg[i]); return NULL; } } vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1); for (i = 0; i < PMDS_PER_MID_PAGE; i++) { copy_page(pte_newpg[i], pte_pg); paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT); pmdp = lookup_pmd_address(vaddr); BUG_ON(!pmdp); spin_lock_irqsave(&p2m_update_lock, flags); ptechk = lookup_address(vaddr, &level); if (ptechk == pte_pg) { HYPERVISOR_shared_info->arch.p2m_generation++; wmb(); /* Tools are synchronizing via p2m_generation. */ set_pmd(pmdp, __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); wmb(); /* Tools are synchronizing via p2m_generation. */ HYPERVISOR_shared_info->arch.p2m_generation++; pte_newpg[i] = NULL; } spin_unlock_irqrestore(&p2m_update_lock, flags); if (pte_newpg[i]) { paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT); free_p2m_page(pte_newpg[i]); } vaddr += PMD_SIZE; } return lookup_address(addr, &level); } /* * Fully allocate the p2m structure for a given pfn. We need to check * that both the top and mid levels are allocated, and make sure the * parallel mfn tree is kept in sync. We may race with other cpus, so * the new pages are installed with cmpxchg; if we lose the race then * simply free the page we allocated and use the one that's there. */ int xen_alloc_p2m_entry(unsigned long pfn) { unsigned topidx; unsigned long *top_mfn_p, *mid_mfn; pte_t *ptep, *pte_pg; unsigned int level; unsigned long flags; unsigned long addr = (unsigned long)(xen_p2m_addr + pfn); unsigned long p2m_pfn; ptep = lookup_address(addr, &level); BUG_ON(!ptep || level != PG_LEVEL_4K); pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { /* PMD level is missing, allocate a new one */ ptep = alloc_p2m_pmd(addr, pte_pg); if (!ptep) return -ENOMEM; } if (p2m_top_mfn && pfn < MAX_P2M_PFN) { topidx = p2m_top_index(pfn); top_mfn_p = &p2m_top_mfn[topidx]; mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]); BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); if (mid_mfn == p2m_mid_missing_mfn) { /* Separately check the mid mfn level */ unsigned long missing_mfn; unsigned long mid_mfn_mfn; mid_mfn = alloc_p2m_page(); if (!mid_mfn) return -ENOMEM; p2m_mid_mfn_init(mid_mfn, p2m_missing); missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); mid_mfn_mfn = virt_to_mfn(mid_mfn); /* try_cmpxchg() updates missing_mfn on failure. */ if (try_cmpxchg(top_mfn_p, &missing_mfn, mid_mfn_mfn)) { p2m_top_mfn_p[topidx] = mid_mfn; } else { free_p2m_page(mid_mfn); mid_mfn = mfn_to_virt(missing_mfn); } } } else { mid_mfn = NULL; } p2m_pfn = pte_pfn(READ_ONCE(*ptep)); if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) || p2m_pfn == PFN_DOWN(__pa(p2m_missing))) { /* p2m leaf page is missing */ unsigned long *p2m; p2m = alloc_p2m_page(); if (!p2m) return -ENOMEM; if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) p2m_init(p2m); else p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1)); spin_lock_irqsave(&p2m_update_lock, flags); if (pte_pfn(*ptep) == p2m_pfn) { HYPERVISOR_shared_info->arch.p2m_generation++; wmb(); /* Tools are synchronizing via p2m_generation. */ set_pte(ptep, pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL)); wmb(); /* Tools are synchronizing via p2m_generation. */ HYPERVISOR_shared_info->arch.p2m_generation++; if (mid_mfn) mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m); p2m = NULL; } spin_unlock_irqrestore(&p2m_update_lock, flags); if (p2m) free_p2m_page(p2m); } /* Expanded the p2m? */ if (pfn >= xen_p2m_last_pfn) { xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE); HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; } return 0; } EXPORT_SYMBOL(xen_alloc_p2m_entry); unsigned long __init set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e) { unsigned long pfn; if (unlikely(pfn_s >= xen_p2m_size)) return 0; if (pfn_s > pfn_e) return 0; if (pfn_e > xen_p2m_size) pfn_e = xen_p2m_size; for (pfn = pfn_s; pfn < pfn_e; pfn++) xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn); return pfn - pfn_s; } bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { pte_t *ptep; unsigned int level; /* Only invalid entries allowed above the highest p2m covered frame. */ if (unlikely(pfn >= xen_p2m_size)) return mfn == INVALID_P2M_ENTRY; /* * The interface requires atomic updates on p2m elements. * xen_safe_write_ulong() is using an atomic store via asm(). */ if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn))) return true; ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level); BUG_ON(!ptep || level != PG_LEVEL_4K); if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing))) return mfn == INVALID_P2M_ENTRY; if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity))) return mfn == IDENTITY_FRAME(pfn); return false; } bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (unlikely(!__set_phys_to_machine(pfn, mfn))) { int ret; ret = xen_alloc_p2m_entry(pfn); if (ret < 0) return false; return __set_phys_to_machine(pfn, mfn); } return true; } int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count) { int i, ret = 0; pte_t *pte; if (xen_feature(XENFEAT_auto_translated_physmap)) return 0; if (kmap_ops) { ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, kmap_ops, count); if (ret) goto out; } for (i = 0; i < count; i++) { unsigned long mfn, pfn; struct gnttab_unmap_grant_ref unmap[2]; int rc; /* Do not add to override if the map failed. */ if (map_ops[i].status != GNTST_okay || (kmap_ops && kmap_ops[i].status != GNTST_okay)) continue; if (map_ops[i].flags & GNTMAP_contains_pte) { pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + (map_ops[i].host_addr & ~PAGE_MASK)); mfn = pte_mfn(*pte); } else { mfn = PFN_DOWN(map_ops[i].dev_bus_addr); } pfn = page_to_pfn(pages[i]); WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned"); if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) continue; /* * Signal an error for this slot. This in turn requires * immediate unmapping. */ map_ops[i].status = GNTST_general_error; unmap[0].host_addr = map_ops[i].host_addr; unmap[0].handle = map_ops[i].handle; map_ops[i].handle = INVALID_GRANT_HANDLE; if (map_ops[i].flags & GNTMAP_device_map) unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr; else unmap[0].dev_bus_addr = 0; if (kmap_ops) { kmap_ops[i].status = GNTST_general_error; unmap[1].host_addr = kmap_ops[i].host_addr; unmap[1].handle = kmap_ops[i].handle; kmap_ops[i].handle = INVALID_GRANT_HANDLE; if (kmap_ops[i].flags & GNTMAP_device_map) unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr; else unmap[1].dev_bus_addr = 0; } /* * Pre-populate both status fields, to be recognizable in * the log message below. */ unmap[0].status = 1; unmap[1].status = 1; rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap, 1 + !!kmap_ops); if (rc || unmap[0].status != GNTST_okay || unmap[1].status != GNTST_okay) pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n", rc, unmap[0].status, unmap[1].status); } out: return ret; } int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, struct gnttab_unmap_grant_ref *kunmap_ops, struct page **pages, unsigned int count) { int i, ret = 0; if (xen_feature(XENFEAT_auto_translated_physmap)) return 0; for (i = 0; i < count; i++) { unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); unsigned long pfn = page_to_pfn(pages[i]); if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT)) set_phys_to_machine(pfn, INVALID_P2M_ENTRY); else ret = -EINVAL; } if (kunmap_ops) ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, kunmap_ops, count) ?: ret; return ret; } /* Remapped non-RAM areas */ #define NR_NONRAM_REMAP 4 static struct nonram_remap { phys_addr_t maddr; phys_addr_t paddr; size_t size; } xen_nonram_remap[NR_NONRAM_REMAP] __ro_after_init; static unsigned int nr_nonram_remap __ro_after_init; /* * Do the real remapping of non-RAM regions as specified in the * xen_nonram_remap[] array. * In case of an error just crash the system. */ void __init xen_do_remap_nonram(void) { unsigned int i; unsigned int remapped = 0; const struct nonram_remap *remap = xen_nonram_remap; unsigned long pfn, mfn, end_pfn; for (i = 0; i < nr_nonram_remap; i++) { end_pfn = PFN_UP(remap->paddr + remap->size); pfn = PFN_DOWN(remap->paddr); mfn = PFN_DOWN(remap->maddr); while (pfn < end_pfn) { if (!set_phys_to_machine(pfn, mfn)) panic("Failed to set p2m mapping for pfn=%lx mfn=%lx\n", pfn, mfn); pfn++; mfn++; remapped++; } remap++; } pr_info("Remapped %u non-RAM page(s)\n", remapped); } #ifdef CONFIG_ACPI /* * Xen variant of acpi_os_ioremap() taking potentially remapped non-RAM * regions into account. * Any attempt to map an area crossing a remap boundary will produce a * WARN() splat. * phys is related to remap->maddr on input and will be rebased to remap->paddr. */ static void __iomem *xen_acpi_os_ioremap(acpi_physical_address phys, acpi_size size) { unsigned int i; const struct nonram_remap *remap = xen_nonram_remap; for (i = 0; i < nr_nonram_remap; i++) { if (phys + size > remap->maddr && phys < remap->maddr + remap->size) { WARN_ON(phys < remap->maddr || phys + size > remap->maddr + remap->size); phys += remap->paddr - remap->maddr; break; } } return x86_acpi_os_ioremap(phys, size); } #endif /* CONFIG_ACPI */ /* * Add a new non-RAM remap entry. * In case of no free entry found, just crash the system. */ void __init xen_add_remap_nonram(phys_addr_t maddr, phys_addr_t paddr, unsigned long size) { BUG_ON((maddr & ~PAGE_MASK) != (paddr & ~PAGE_MASK)); if (nr_nonram_remap == NR_NONRAM_REMAP) { xen_raw_console_write("Number of required E820 entry remapping actions exceed maximum value\n"); BUG(); } #ifdef CONFIG_ACPI /* Switch to the Xen acpi_os_ioremap() variant. */ if (nr_nonram_remap == 0) acpi_os_ioremap = xen_acpi_os_ioremap; #endif xen_nonram_remap[nr_nonram_remap].maddr = maddr; xen_nonram_remap[nr_nonram_remap].paddr = paddr; xen_nonram_remap[nr_nonram_remap].size = size; nr_nonram_remap++; } #ifdef CONFIG_XEN_DEBUG_FS #include <linux/debugfs.h> static int p2m_dump_show(struct seq_file *m, void *v) { static const char * const type_name[] = { [P2M_TYPE_IDENTITY] = "identity", [P2M_TYPE_MISSING] = "missing", [P2M_TYPE_PFN] = "pfn", [P2M_TYPE_UNKNOWN] = "abnormal"}; unsigned long pfn, first_pfn; int type, prev_type; prev_type = xen_p2m_elem_type(0); first_pfn = 0; for (pfn = 0; pfn < xen_p2m_size; pfn++) { type = xen_p2m_elem_type(pfn); if (type != prev_type) { seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn, type_name[prev_type]); prev_type = type; first_pfn = pfn; } } seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn, type_name[prev_type]); return 0; } DEFINE_SHOW_ATTRIBUTE(p2m_dump); static struct dentry *d_mmu_debug; static int __init xen_p2m_debugfs(void) { struct dentry *d_xen = xen_init_debugfs(); d_mmu_debug = debugfs_create_dir("mmu", d_xen); debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); return 0; } fs_initcall(xen_p2m_debugfs); #endif /* CONFIG_XEN_DEBUG_FS */
// SPDX-License-Identifier: GPL-2.0-or-later /* * USB CDC EEM network interface driver * Copyright (C) 2009 Oberthur Technologies * by Omar Laazimani, Olivier Condemine */ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ctype.h> #include <linux/ethtool.h> #include <linux/workqueue.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/crc32.h> #include <linux/usb/cdc.h> #include <linux/usb/usbnet.h> #include <linux/gfp.h> #include <linux/if_vlan.h> /* * This driver is an implementation of the CDC "Ethernet Emulation * Model" (EEM) specification, which encapsulates Ethernet frames * for transport over USB using a simpler USB device model than the * previous CDC "Ethernet Control Model" (ECM, or "CDC Ethernet"). * * For details, see https://usb.org/sites/default/files/CDC_EEM10.pdf * * This version has been tested with GIGAntIC WuaoW SIM Smart Card on 2.6.24, * 2.6.27 and 2.6.30rc2 kernel. * It has also been validated on Openmoko Om 2008.12 (based on 2.6.24 kernel). * build on 23-April-2009 */ #define EEM_HEAD 2 /* 2 byte header */ /*-------------------------------------------------------------------------*/ static void eem_linkcmd_complete(struct urb *urb) { dev_kfree_skb(urb->context); usb_free_urb(urb); } static void eem_linkcmd(struct usbnet *dev, struct sk_buff *skb) { struct urb *urb; int status; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto fail; usb_fill_bulk_urb(urb, dev->udev, dev->out, skb->data, skb->len, eem_linkcmd_complete, skb); status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { usb_free_urb(urb); fail: dev_kfree_skb(skb); netdev_warn(dev->net, "link cmd failure\n"); return; } } static int eem_bind(struct usbnet *dev, struct usb_interface *intf) { int status = 0; status = usbnet_get_endpoints(dev, intf); if (status < 0) return status; /* no jumbogram (16K) support for now */ dev->net->hard_header_len += EEM_HEAD + ETH_FCS_LEN + VLAN_HLEN; dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; return 0; } /* * EEM permits packing multiple Ethernet frames into USB transfers * (a "bundle"), but for TX we don't try to do that. */ static struct sk_buff *eem_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) { struct sk_buff *skb2 = NULL; u16 len = skb->len; u32 crc = 0; int padlen = 0; /* When ((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket) is * zero, stick two bytes of zero length EEM packet on the end. * Else the framework would add invalid single byte padding, * since it can't know whether ZLPs will be handled right by * all the relevant hardware and software. */ if (!((len + EEM_HEAD + ETH_FCS_LEN) % dev->maxpacket)) padlen += 2; if (!skb_cloned(skb)) { int headroom = skb_headroom(skb); int tailroom = skb_tailroom(skb); if ((tailroom >= ETH_FCS_LEN + padlen) && (headroom >= EEM_HEAD)) goto done; if ((headroom + tailroom) > (EEM_HEAD + ETH_FCS_LEN + padlen)) { skb->data = memmove(skb->head + EEM_HEAD, skb->data, skb->len); skb_set_tail_pointer(skb, len); goto done; } } skb2 = skb_copy_expand(skb, EEM_HEAD, ETH_FCS_LEN + padlen, flags); dev_kfree_skb_any(skb); if (!skb2) return NULL; skb = skb2; done: /* we don't use the "no Ethernet CRC" option */ crc = crc32_le(~0, skb->data, skb->len); crc = ~crc; put_unaligned_le32(crc, skb_put(skb, 4)); /* EEM packet header format: * b0..13: length of ethernet frame * b14: bmCRC (1 == valid Ethernet CRC) * b15: bmType (0 == data) */ len = skb->len; put_unaligned_le16(BIT(14) | len, skb_push(skb, 2)); /* Bundle a zero length EEM packet if needed */ if (padlen) put_unaligned_le16(0, skb_put(skb, 2)); return skb; } static int eem_rx_fixup(struct usbnet *dev, struct sk_buff *skb) { /* * Our task here is to strip off framing, leaving skb with one * data frame for the usbnet framework code to process. But we * may have received multiple EEM payloads, or command payloads. * So we must process _everything_ as if it's a header, except * maybe the last data payload * * REVISIT the framework needs updating so that when we consume * all payloads (the last or only message was a command, or a * zero length EEM packet) that is not accounted as an rx_error. */ do { struct sk_buff *skb2 = NULL; u16 header; u16 len = 0; /* incomplete EEM header? */ if (skb->len < EEM_HEAD) return 0; /* * EEM packet header format: * b0..14: EEM type dependent (Data or Command) * b15: bmType */ header = get_unaligned_le16(skb->data); skb_pull(skb, EEM_HEAD); /* * The bmType bit helps to denote when EEM * packet is data or command : * bmType = 0 : EEM data payload * bmType = 1 : EEM (link) command */ if (header & BIT(15)) { u16 bmEEMCmd; /* * EEM (link) command packet: * b0..10: bmEEMCmdParam * b11..13: bmEEMCmd * b14: bmReserved (must be 0) * b15: 1 (EEM command) */ if (header & BIT(14)) { netdev_dbg(dev->net, "reserved command %04x\n", header); continue; } bmEEMCmd = (header >> 11) & 0x7; switch (bmEEMCmd) { /* Responding to echo requests is mandatory. */ case 0: /* Echo command */ len = header & 0x7FF; /* bogus command? */ if (skb->len < len) return 0; skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) goto next; skb_trim(skb2, len); put_unaligned_le16(BIT(15) | BIT(11) | len, skb_push(skb2, 2)); eem_linkcmd(dev, skb2); break; /* * Host may choose to ignore hints. * - suspend: peripheral ready to suspend * - response: suggest N millisec polling * - response complete: suggest N sec polling * * Suspend is reported and maybe heeded. */ case 2: /* Suspend hint */ usbnet_device_suggests_idle(dev); continue; case 3: /* Response hint */ case 4: /* Response complete hint */ continue; /* * Hosts should never receive host-to-peripheral * or reserved command codes; or responses to an * echo command we didn't send. */ case 1: /* Echo response */ case 5: /* Tickle */ default: /* reserved */ netdev_warn(dev->net, "unexpected link command %d\n", bmEEMCmd); continue; } } else { u32 crc, crc2; int is_last; /* zero length EEM packet? */ if (header == 0) continue; /* * EEM data packet header : * b0..13: length of ethernet frame * b14: bmCRC * b15: 0 (EEM data) */ len = header & 0x3FFF; /* bogus EEM payload? */ if (skb->len < len) return 0; /* bogus ethernet frame? */ if (len < (ETH_HLEN + ETH_FCS_LEN)) goto next; /* * Treat the last payload differently: framework * code expects our "fixup" to have stripped off * headers, so "skb" is a data packet (or error). * Else if it's not the last payload, keep "skb" * for further processing. */ is_last = (len == skb->len); if (is_last) skb2 = skb; else { skb2 = skb_clone(skb, GFP_ATOMIC); if (unlikely(!skb2)) return 0; } /* * The bmCRC helps to denote when the CRC field in * the Ethernet frame contains a calculated CRC: * bmCRC = 1 : CRC is calculated * bmCRC = 0 : CRC = 0xDEADBEEF */ if (header & BIT(14)) { crc = get_unaligned_le32(skb2->data + len - ETH_FCS_LEN); crc2 = ~crc32_le(~0, skb2->data, skb2->len - ETH_FCS_LEN); } else { crc = get_unaligned_be32(skb2->data + len - ETH_FCS_LEN); crc2 = 0xdeadbeef; } skb_trim(skb2, len - ETH_FCS_LEN); if (is_last) return crc == crc2; if (unlikely(crc != crc2)) { dev->net->stats.rx_errors++; dev_kfree_skb_any(skb2); } else usbnet_skb_return(dev, skb2); } next: skb_pull(skb, len); } while (skb->len); return 1; } static const struct driver_info eem_info = { .description = "CDC EEM Device", .flags = FLAG_ETHER | FLAG_POINTTOPOINT, .bind = eem_bind, .rx_fixup = eem_rx_fixup, .tx_fixup = eem_tx_fixup, }; /*-------------------------------------------------------------------------*/ static const struct usb_device_id products[] = { { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_EEM, USB_CDC_PROTO_EEM), .driver_info = (unsigned long) &eem_info, }, { /* EMPTY == end of list */ }, }; MODULE_DEVICE_TABLE(usb, products); static struct usb_driver eem_driver = { .name = "cdc_eem", .id_table = products, .probe = usbnet_probe, .disconnect = usbnet_disconnect, .suspend = usbnet_suspend, .resume = usbnet_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(eem_driver); MODULE_AUTHOR("Omar Laazimani <[email protected]>"); MODULE_DESCRIPTION("USB CDC EEM"); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0 /* * Serial port driver for NXP LPC18xx/43xx UART * * Copyright (C) 2015 Joachim Eastwood <[email protected]> * * Based on 8250_mtk.c: * Copyright (c) 2014 MundoReader S.L. * Matthias Brugger <[email protected]> */ #include <linux/clk.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include "8250.h" /* Additional LPC18xx/43xx 8250 registers and bits */ #define LPC18XX_UART_RS485CTRL (0x04c / sizeof(u32)) #define LPC18XX_UART_RS485CTRL_NMMEN BIT(0) #define LPC18XX_UART_RS485CTRL_DCTRL BIT(4) #define LPC18XX_UART_RS485CTRL_OINV BIT(5) #define LPC18XX_UART_RS485DLY (0x054 / sizeof(u32)) #define LPC18XX_UART_RS485DLY_MAX 255 struct lpc18xx_uart_data { struct uart_8250_dma dma; struct clk *clk_uart; struct clk *clk_reg; int line; }; static int lpc18xx_rs485_config(struct uart_port *port, struct ktermios *termios, struct serial_rs485 *rs485) { struct uart_8250_port *up = up_to_u8250p(port); u32 rs485_ctrl_reg = 0; u32 rs485_dly_reg = 0; unsigned baud_clk; if (rs485->flags & SER_RS485_ENABLED) { rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_NMMEN | LPC18XX_UART_RS485CTRL_DCTRL; if (rs485->flags & SER_RS485_RTS_ON_SEND) rs485_ctrl_reg |= LPC18XX_UART_RS485CTRL_OINV; } if (rs485->delay_rts_after_send) { baud_clk = port->uartclk / up->dl_read(up); rs485_dly_reg = DIV_ROUND_UP(rs485->delay_rts_after_send * baud_clk, MSEC_PER_SEC); if (rs485_dly_reg > LPC18XX_UART_RS485DLY_MAX) rs485_dly_reg = LPC18XX_UART_RS485DLY_MAX; /* Calculate the resulting delay in ms */ rs485->delay_rts_after_send = (rs485_dly_reg * MSEC_PER_SEC) / baud_clk; } serial_out(up, LPC18XX_UART_RS485CTRL, rs485_ctrl_reg); serial_out(up, LPC18XX_UART_RS485DLY, rs485_dly_reg); return 0; } static void lpc18xx_uart_serial_out(struct uart_port *p, int offset, int value) { /* * For DMA mode one must ensure that the UART_FCR_DMA_SELECT * bit is set when FIFO is enabled. Even if DMA is not used * setting this bit doesn't seem to affect anything. */ if (offset == UART_FCR && (value & UART_FCR_ENABLE_FIFO)) value |= UART_FCR_DMA_SELECT; offset = offset << p->regshift; writel(value, p->membase + offset); } static const struct serial_rs485 lpc18xx_rs485_supported = { .flags = SER_RS485_ENABLED | SER_RS485_RTS_ON_SEND | SER_RS485_RTS_AFTER_SEND, .delay_rts_after_send = 1, /* Delay RTS before send is not supported */ }; static int lpc18xx_serial_probe(struct platform_device *pdev) { struct lpc18xx_uart_data *data; struct uart_8250_port uart; struct resource *res; int ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "memory resource not found"); return -EINVAL; } memset(&uart, 0, sizeof(uart)); uart.port.membase = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!uart.port.membase) return -ENOMEM; data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->clk_uart = devm_clk_get(&pdev->dev, "uartclk"); if (IS_ERR(data->clk_uart)) { dev_err(&pdev->dev, "uart clock not found\n"); return PTR_ERR(data->clk_uart); } data->clk_reg = devm_clk_get(&pdev->dev, "reg"); if (IS_ERR(data->clk_reg)) { dev_err(&pdev->dev, "reg clock not found\n"); return PTR_ERR(data->clk_reg); } ret = clk_prepare_enable(data->clk_reg); if (ret) { dev_err(&pdev->dev, "unable to enable reg clock\n"); return ret; } ret = clk_prepare_enable(data->clk_uart); if (ret) { dev_err(&pdev->dev, "unable to enable uart clock\n"); goto dis_clk_reg; } data->dma.rx_param = data; data->dma.tx_param = data; spin_lock_init(&uart.port.lock); uart.port.dev = &pdev->dev; uart.port.mapbase = res->start; uart.port.type = PORT_16550A; uart.port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE | UPF_SKIP_TEST; uart.port.uartclk = clk_get_rate(data->clk_uart); uart.port.private_data = data; uart.port.rs485_config = lpc18xx_rs485_config; uart.port.rs485_supported = lpc18xx_rs485_supported; uart.port.serial_out = lpc18xx_uart_serial_out; ret = uart_read_port_properties(&uart.port); if (ret) goto dis_uart_clk; uart.port.iotype = UPIO_MEM32; uart.port.regshift = 2; uart.dma = &data->dma; uart.dma->rxconf.src_maxburst = 1; uart.dma->txconf.dst_maxburst = 1; ret = serial8250_register_8250_port(&uart); if (ret < 0) { dev_err(&pdev->dev, "unable to register 8250 port\n"); goto dis_uart_clk; } data->line = ret; platform_set_drvdata(pdev, data); return 0; dis_uart_clk: clk_disable_unprepare(data->clk_uart); dis_clk_reg: clk_disable_unprepare(data->clk_reg); return ret; } static void lpc18xx_serial_remove(struct platform_device *pdev) { struct lpc18xx_uart_data *data = platform_get_drvdata(pdev); serial8250_unregister_port(data->line); clk_disable_unprepare(data->clk_uart); clk_disable_unprepare(data->clk_reg); } static const struct of_device_id lpc18xx_serial_match[] = { { .compatible = "nxp,lpc1850-uart" }, { }, }; MODULE_DEVICE_TABLE(of, lpc18xx_serial_match); static struct platform_driver lpc18xx_serial_driver = { .probe = lpc18xx_serial_probe, .remove = lpc18xx_serial_remove, .driver = { .name = "lpc18xx-uart", .of_match_table = lpc18xx_serial_match, }, }; module_platform_driver(lpc18xx_serial_driver); MODULE_AUTHOR("Joachim Eastwood <[email protected]>"); MODULE_DESCRIPTION("Serial port driver NXP LPC18xx/43xx devices"); MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ /* * Copyright (C) 2005-2014, 2023 Intel Corporation * Copyright (C) 2015-2017 Intel Deutschland GmbH */ #ifndef __iwl_notif_wait_h__ #define __iwl_notif_wait_h__ #include <linux/wait.h> #include "iwl-trans.h" struct iwl_notif_wait_data { struct list_head notif_waits; spinlock_t notif_wait_lock; wait_queue_head_t notif_waitq; }; #define MAX_NOTIF_CMDS 5 /** * struct iwl_notification_wait - notification wait entry * @list: list head for global list * @fn: Function called with the notification. If the function * returns true, the wait is over, if it returns false then * the waiter stays blocked. If no function is given, any * of the listed commands will unblock the waiter. * @fn_data: pointer to pass to the @fn's data argument * @cmds: command IDs * @n_cmds: number of command IDs * @triggered: waiter should be woken up * @aborted: wait was aborted * * This structure is not used directly, to wait for a * notification declare it on the stack, and call * iwl_init_notification_wait() with appropriate * parameters. Then do whatever will cause the ucode * to notify the driver, and to wait for that then * call iwl_wait_notification(). * * Each notification is one-shot. If at some point we * need to support multi-shot notifications (which * can't be allocated on the stack) we need to modify * the code for them. */ struct iwl_notification_wait { struct list_head list; bool (*fn)(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data); void *fn_data; u16 cmds[MAX_NOTIF_CMDS]; u8 n_cmds; bool triggered, aborted; }; /* caller functions */ void iwl_notification_wait_init(struct iwl_notif_wait_data *notif_data); bool iwl_notification_wait(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt); void iwl_abort_notification_waits(struct iwl_notif_wait_data *notif_data); static inline void iwl_notification_notify(struct iwl_notif_wait_data *notif_data) { wake_up_all(&notif_data->notif_waitq); } static inline void iwl_notification_wait_notify(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt) { if (iwl_notification_wait(notif_data, pkt)) iwl_notification_notify(notif_data); } /* user functions */ void __acquires(wait_entry) iwl_init_notification_wait(struct iwl_notif_wait_data *notif_data, struct iwl_notification_wait *wait_entry, const u16 *cmds, int n_cmds, bool (*fn)(struct iwl_notif_wait_data *notif_data, struct iwl_rx_packet *pkt, void *data), void *fn_data); int __must_check __releases(wait_entry) iwl_wait_notification(struct iwl_notif_wait_data *notif_data, struct iwl_notification_wait *wait_entry, unsigned long timeout); void __releases(wait_entry) iwl_remove_notification(struct iwl_notif_wait_data *notif_data, struct iwl_notification_wait *wait_entry); #endif /* __iwl_notif_wait_h__ */
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00mac Abstract: rt2x00 generic mac80211 routines. */ #include <linux/kernel.h> #include <linux/module.h> #include "rt2x00.h" #include "rt2x00lib.h" static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, struct data_queue *queue, struct sk_buff *frag_skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb); struct ieee80211_tx_info *rts_info; struct sk_buff *skb; unsigned int data_length; int retval = 0; if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) data_length = sizeof(struct ieee80211_cts); else data_length = sizeof(struct ieee80211_rts); skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom); if (unlikely(!skb)) { rt2x00_warn(rt2x00dev, "Failed to create RTS/CTS frame\n"); return -ENOMEM; } skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom); skb_put(skb, data_length); /* * Copy TX information over from original frame to * RTS/CTS frame. Note that we set the no encryption flag * since we don't want this frame to be encrypted. * RTS frames should be acked, while CTS-to-self frames * should not. The ready for TX flag is cleared to prevent * it being automatically send when the descriptor is * written to the hardware. */ memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb)); rts_info = IEEE80211_SKB_CB(skb); rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS; rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT; if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) rts_info->flags |= IEEE80211_TX_CTL_NO_ACK; else rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK; /* Disable hardware encryption */ rts_info->control.hw_key = NULL; /* * RTS/CTS frame should use the length of the frame plus any * encryption overhead that will be added by the hardware. */ data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif, frag_skb->data, data_length, tx_info, (struct ieee80211_cts *)(skb->data)); else ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif, frag_skb->data, data_length, tx_info, (struct ieee80211_rts *)(skb->data)); retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true); if (retval) { dev_kfree_skb_any(skb); rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n"); } return retval; } void rt2x00mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct rt2x00_dev *rt2x00dev = hw->priv; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); enum data_queue_qid qid = skb_get_queue_mapping(skb); struct data_queue *queue = NULL; /* * Mac80211 might be calling this function while we are trying * to remove the device or perhaps suspending it. * Note that we can only stop the TX queues inside the TX path * due to possible race conditions in mac80211. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) goto exit_free_skb; /* * Use the ATIM queue if appropriate and present. */ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) qid = QID_ATIM; queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); if (unlikely(!queue)) { rt2x00_err(rt2x00dev, "Attempt to send packet over invalid queue %d\n" "Please file bug report to %s\n", qid, DRV_PROJECT); goto exit_free_skb; } /* * If CTS/RTS is required. create and queue that frame first. * Make sure we have at least enough entries available to send * this CTS/RTS frame as well as the data frame. * Note that when the driver has set the set_rts_threshold() * callback function it doesn't need software generation of * either RTS or CTS-to-self frame and handles everything * inside the hardware. */ if (!rt2x00dev->ops->hw->set_rts_threshold && (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT))) { if (rt2x00queue_available(queue) <= 1) { /* * Recheck for full queue under lock to avoid race * conditions with rt2x00lib_txdone(). */ spin_lock(&queue->tx_lock); if (rt2x00queue_threshold(queue)) rt2x00queue_pause_queue(queue); spin_unlock(&queue->tx_lock); goto exit_free_skb; } if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) goto exit_free_skb; } if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) goto exit_free_skb; return; exit_free_skb: ieee80211_free_txskb(hw, skb); } EXPORT_SYMBOL_GPL(rt2x00mac_tx); int rt2x00mac_start(struct ieee80211_hw *hw) { struct rt2x00_dev *rt2x00dev = hw->priv; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) { /* * This is special case for ieee80211_restart_hw(), otherwise * mac80211 never call start() two times in row without stop(); */ set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags); rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev); rt2x00lib_stop(rt2x00dev); } return rt2x00lib_start(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00mac_start); void rt2x00mac_stop(struct ieee80211_hw *hw, bool suspend) { struct rt2x00_dev *rt2x00dev = hw->priv; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; rt2x00lib_stop(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00mac_stop); void rt2x00mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { struct rt2x00_dev *rt2x00dev = hw->priv; if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART) clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags); } EXPORT_SYMBOL_GPL(rt2x00mac_reconfig_complete); int rt2x00mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_intf *intf = vif_to_intf(vif); struct data_queue *queue = rt2x00dev->bcn; struct queue_entry *entry = NULL; unsigned int i; /* * Don't allow interfaces to be added * the device has disappeared. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) return -ENODEV; /* * Loop through all beacon queues to find a free * entry. Since there are as much beacon entries * as the maximum interfaces, this search shouldn't * fail. */ for (i = 0; i < queue->limit; i++) { entry = &queue->entries[i]; if (!test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags)) break; } if (unlikely(i == queue->limit)) return -ENOBUFS; /* * We are now absolutely sure the interface can be created, * increase interface count and start initialization. */ if (vif->type == NL80211_IFTYPE_AP) rt2x00dev->intf_ap_count++; else rt2x00dev->intf_sta_count++; mutex_init(&intf->beacon_skb_mutex); intf->beacon = entry; /* * The MAC address must be configured after the device * has been initialized. Otherwise the device can reset * the MAC registers. * The BSSID address must only be configured in AP mode, * however we should not send an empty BSSID address for * STA interfaces at this time, since this can cause * invalid behavior in the device. */ rt2x00lib_config_intf(rt2x00dev, intf, vif->type, vif->addr, NULL); /* * Some filters depend on the current working mode. We can force * an update during the next configure_filter() run by mac80211 by * resetting the current packet_filter state. */ rt2x00dev->packet_filter = 0; return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_add_interface); void rt2x00mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_intf *intf = vif_to_intf(vif); /* * Don't allow interfaces to be remove while * either the device has disappeared or when * no interface is present. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || (vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) || (vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count)) return; if (vif->type == NL80211_IFTYPE_AP) rt2x00dev->intf_ap_count--; else rt2x00dev->intf_sta_count--; /* * Release beacon entry so it is available for * new interfaces again. */ clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags); /* * Make sure the bssid and mac address registers * are cleared to prevent false ACKing of frames. */ rt2x00lib_config_intf(rt2x00dev, intf, NL80211_IFTYPE_UNSPECIFIED, NULL, NULL); } EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface); int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed) { struct rt2x00_dev *rt2x00dev = hw->priv; struct ieee80211_conf *conf = &hw->conf; /* * mac80211 might be calling this function while we are trying * to remove the device or perhaps suspending it. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; /* * Some configuration parameters (e.g. channel and antenna values) can * only be set when the radio is enabled, but do require the RX to * be off. During this period we should keep link tuning enabled, * if for any reason the link tuner must be reset, this will be * handled by rt2x00lib_config(). */ rt2x00queue_stop_queue(rt2x00dev->rx); /* Do not race with link tuner. */ mutex_lock(&rt2x00dev->conf_mutex); /* * When we've just turned on the radio, we want to reprogram * everything to ensure a consistent state */ rt2x00lib_config(rt2x00dev, conf, changed); /* * After the radio has been enabled we need to configure * the antenna to the default settings. rt2x00lib_config_antenna() * should determine if any action should be taken based on * checking if diversity has been enabled or no antenna changes * have been made since the last configuration change. */ rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant); mutex_unlock(&rt2x00dev->conf_mutex); /* Turn RX back on */ rt2x00queue_start_queue(rt2x00dev->rx); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_config); void rt2x00mac_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct rt2x00_dev *rt2x00dev = hw->priv; /* * Mask off any flags we are going to ignore * from the total_flags field. */ *total_flags &= FIF_ALLMULTI | FIF_FCSFAIL | FIF_PLCPFAIL | FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS; /* * Apply some rules to the filters: * - Some filters imply different filters to be set. * - Some things we can't filter out at all. * - Multicast filter seems to kill broadcast traffic so never use it. */ *total_flags |= FIF_ALLMULTI; /* * If the device has a single filter for all control frames, * FIF_CONTROL and FIF_PSPOLL flags imply each other. * And if the device has more than one filter for control frames * of different types, but has no a separate filter for PS Poll frames, * FIF_CONTROL flag implies FIF_PSPOLL. */ if (!rt2x00_has_cap_control_filters(rt2x00dev)) { if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL) *total_flags |= FIF_CONTROL | FIF_PSPOLL; } if (!rt2x00_has_cap_control_filter_pspoll(rt2x00dev)) { if (*total_flags & FIF_CONTROL) *total_flags |= FIF_PSPOLL; } rt2x00dev->packet_filter = *total_flags; rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags); } EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter); static void rt2x00mac_set_tim_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rt2x00_intf *intf = vif_to_intf(vif); if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC && vif->type != NL80211_IFTYPE_MESH_POINT) return; set_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags); } int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct rt2x00_dev *rt2x00dev = hw->priv; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return 0; ieee80211_iterate_active_interfaces_atomic( rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, rt2x00mac_set_tim_iter, rt2x00dev); /* queue work to upodate the beacon template */ ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_set_tim); #ifdef CONFIG_RT2X00_LIB_CRYPTO static void memcpy_tkip(struct rt2x00lib_crypto *crypto, u8 *key, u8 key_len) { if (key_len > NL80211_TKIP_DATA_OFFSET_ENCR_KEY) memcpy(crypto->key, &key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY], sizeof(crypto->key)); if (key_len > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) memcpy(crypto->tx_mic, &key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], sizeof(crypto->tx_mic)); if (key_len > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY) memcpy(crypto->rx_mic, &key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], sizeof(crypto->rx_mic)); } int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct rt2x00_dev *rt2x00dev = hw->priv; int (*set_key) (struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key); struct rt2x00lib_crypto crypto; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; struct rt2x00_sta *sta_priv = NULL; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; /* The hardware can't do MFP */ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || (sta && sta->mfp)) return -EOPNOTSUPP; /* * To support IBSS RSN, don't program group keys in IBSS, the * hardware will then not attempt to decrypt the frames. */ if (vif->type == NL80211_IFTYPE_ADHOC && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return -EOPNOTSUPP; if (key->keylen > 32) return -ENOSPC; memset(&crypto, 0, sizeof(crypto)); crypto.bssidx = rt2x00lib_get_bssidx(rt2x00dev, vif); crypto.cipher = rt2x00crypto_key_to_cipher(key); if (crypto.cipher == CIPHER_NONE) return -EOPNOTSUPP; if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev)) return -EOPNOTSUPP; crypto.cmd = cmd; if (sta) { crypto.address = sta->addr; sta_priv = sta_to_rt2x00_sta(sta); crypto.wcid = sta_priv->wcid; } else crypto.address = bcast_addr; if (crypto.cipher == CIPHER_TKIP) memcpy_tkip(&crypto, &key->key[0], key->keylen); else memcpy(crypto.key, &key->key[0], key->keylen); /* * Each BSS has a maximum of 4 shared keys. * Shared key index values: * 0) BSS0 key0 * 1) BSS0 key1 * ... * 4) BSS1 key0 * ... * 8) BSS2 key0 * ... * Both pairwise as shared key indeces are determined by * driver. This is required because the hardware requires * keys to be assigned in correct order (When key 1 is * provided but key 0 is not, then the key is not found * by the hardware during RX). */ if (cmd == SET_KEY) key->hw_key_idx = 0; if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) set_key = rt2x00dev->ops->lib->config_pairwise_key; else set_key = rt2x00dev->ops->lib->config_shared_key; if (!set_key) return -EOPNOTSUPP; return set_key(rt2x00dev, &crypto, key); } EXPORT_SYMBOL_GPL(rt2x00mac_set_key); #endif /* CONFIG_RT2X00_LIB_CRYPTO */ void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr) { struct rt2x00_dev *rt2x00dev = hw->priv; set_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags); rt2x00link_stop_tuner(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start); void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; clear_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags); rt2x00link_start_tuner(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_complete); int rt2x00mac_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct rt2x00_dev *rt2x00dev = hw->priv; /* * The dot11ACKFailureCount, dot11RTSFailureCount and * dot11RTSSuccessCount are updated in interrupt time. * dot11FCSErrorCount is updated in the link tuner. */ memcpy(stats, &rt2x00dev->low_level_stats, sizeof(*stats)); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_get_stats); void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u64 changes) { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_intf *intf = vif_to_intf(vif); /* * mac80211 might be calling this function while we are trying * to remove the device or perhaps suspending it. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; /* * Update the BSSID. */ if (changes & BSS_CHANGED_BSSID) rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL, bss_conf->bssid); /* * Start/stop beaconing. */ if (changes & BSS_CHANGED_BEACON_ENABLED) { mutex_lock(&intf->beacon_skb_mutex); /* * Clear the 'enable_beacon' flag and clear beacon because * the beacon queue has been stopped after hardware reset. */ if (test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags) && intf->enable_beacon) { intf->enable_beacon = false; rt2x00queue_clear_beacon(rt2x00dev, vif); } if (!bss_conf->enable_beacon && intf->enable_beacon) { rt2x00dev->intf_beaconing--; intf->enable_beacon = false; if (rt2x00dev->intf_beaconing == 0) { /* * Last beaconing interface disabled * -> stop beacon queue. */ rt2x00queue_stop_queue(rt2x00dev->bcn); } /* * Clear beacon in the H/W for this vif. This is needed * to disable beaconing on this particular interface * and keep it running on other interfaces. */ rt2x00queue_clear_beacon(rt2x00dev, vif); } else if (bss_conf->enable_beacon && !intf->enable_beacon) { rt2x00dev->intf_beaconing++; intf->enable_beacon = true; /* * Upload beacon to the H/W. This is only required on * USB devices. PCI devices fetch beacons periodically. */ if (rt2x00_is_usb(rt2x00dev)) rt2x00queue_update_beacon(rt2x00dev, vif); if (rt2x00dev->intf_beaconing == 1) { /* * First beaconing interface enabled * -> start beacon queue. */ rt2x00queue_start_queue(rt2x00dev->bcn); } } mutex_unlock(&intf->beacon_skb_mutex); } /* * When the association status has changed we must reset the link * tuner counter. This is because some drivers determine if they * should perform link tuning based on the number of seconds * while associated or not associated. */ if (changes & BSS_CHANGED_ASSOC) { rt2x00dev->link.count = 0; if (vif->cfg.assoc) rt2x00dev->intf_associated++; else rt2x00dev->intf_associated--; rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); } /* * When the erp information has changed, we should perform * additional configuration steps. For all other changes we are done. */ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT | BSS_CHANGED_HT)) rt2x00lib_config_erp(rt2x00dev, intf, bss_conf, changes); } EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed); int rt2x00mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue_idx, const struct ieee80211_tx_queue_params *params) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); if (unlikely(!queue)) return -EINVAL; /* * The passed variables are stored as real value ((2^n)-1). * Ralink registers require to know the bit number 'n'. */ if (params->cw_min > 0) queue->cw_min = fls(params->cw_min); else queue->cw_min = 5; /* cw_min: 2^5 = 32. */ if (params->cw_max > 0) queue->cw_max = fls(params->cw_max); else queue->cw_max = 10; /* cw_min: 2^10 = 1024. */ queue->aifs = params->aifs; queue->txop = params->txop; rt2x00_dbg(rt2x00dev, "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d\n", queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_conf_tx); void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw) { struct rt2x00_dev *rt2x00dev = hw->priv; bool active = !!rt2x00dev->ops->lib->rfkill_poll(rt2x00dev); wiphy_rfkill_set_hw_state(hw->wiphy, !active); } EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll); void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; set_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags); tx_queue_for_each(rt2x00dev, queue) rt2x00queue_flush_queue(queue, drop); clear_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags); } EXPORT_SYMBOL_GPL(rt2x00mac_flush); int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) { struct rt2x00_dev *rt2x00dev = hw->priv; struct link_ant *ant = &rt2x00dev->link.ant; struct antenna_setup *def = &rt2x00dev->default_ant; struct antenna_setup setup; // The antenna value is not supposed to be 0, // or exceed the maximum number of antenna's. if (!tx_ant || (tx_ant & ~3) || !rx_ant || (rx_ant & ~3)) return -EINVAL; // When the client tried to configure the antenna to or from // diversity mode, we must reset the default antenna as well // as that controls the diversity switch. if (ant->flags & ANTENNA_TX_DIVERSITY && tx_ant != 3) ant->flags &= ~ANTENNA_TX_DIVERSITY; if (ant->flags & ANTENNA_RX_DIVERSITY && rx_ant != 3) ant->flags &= ~ANTENNA_RX_DIVERSITY; // If diversity is being enabled, check if we need hardware // or software diversity. In the latter case, reset the value, // and make sure we update the antenna flags to have the // link tuner pick up the diversity tuning. if (tx_ant == 3 && def->tx == ANTENNA_SW_DIVERSITY) { tx_ant = ANTENNA_SW_DIVERSITY; ant->flags |= ANTENNA_TX_DIVERSITY; } if (rx_ant == 3 && def->rx == ANTENNA_SW_DIVERSITY) { rx_ant = ANTENNA_SW_DIVERSITY; ant->flags |= ANTENNA_RX_DIVERSITY; } setup.tx = tx_ant; setup.rx = rx_ant; setup.rx_chain_num = 0; setup.tx_chain_num = 0; rt2x00lib_config_antenna(rt2x00dev, setup); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_set_antenna); int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { struct rt2x00_dev *rt2x00dev = hw->priv; struct link_ant *ant = &rt2x00dev->link.ant; struct antenna_setup *active = &rt2x00dev->link.ant.active; // When software diversity is active, we must report this to the // client and not the current active antenna state. if (ant->flags & ANTENNA_TX_DIVERSITY) *tx_ant = ANTENNA_HW_DIVERSITY; else *tx_ant = active->tx; if (ant->flags & ANTENNA_RX_DIVERSITY) *rx_ant = ANTENNA_HW_DIVERSITY; else *rx_ant = active->rx; return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_get_antenna); void rt2x00mac_get_ringparam(struct ieee80211_hw *hw, u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; tx_queue_for_each(rt2x00dev, queue) { *tx += queue->length; *tx_max += queue->limit; } *rx = rt2x00dev->rx->length; *rx_max = rt2x00dev->rx->limit; } EXPORT_SYMBOL_GPL(rt2x00mac_get_ringparam); bool rt2x00mac_tx_frames_pending(struct ieee80211_hw *hw) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; tx_queue_for_each(rt2x00dev, queue) { if (!rt2x00queue_empty(queue)) return true; } return false; } EXPORT_SYMBOL_GPL(rt2x00mac_tx_frames_pending);
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2014 Tomasz Figa <[email protected]> * * Based on Exynos Audio Subsystem Clock Controller driver: * * Copyright (c) 2013 Samsung Electronics Co., Ltd. * Author: Padmavathi Venna <[email protected]> * * Driver for Audio Subsystem Clock Controller of S5PV210-compatible SoCs. */ #include <linux/io.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/of_address.h> #include <linux/syscore_ops.h> #include <linux/init.h> #include <linux/platform_device.h> #include <dt-bindings/clock/s5pv210-audss.h> static DEFINE_SPINLOCK(lock); static void __iomem *reg_base; static struct clk_hw_onecell_data *clk_data; #define ASS_CLK_SRC 0x0 #define ASS_CLK_DIV 0x4 #define ASS_CLK_GATE 0x8 #ifdef CONFIG_PM_SLEEP static unsigned long reg_save[][2] = { {ASS_CLK_SRC, 0}, {ASS_CLK_DIV, 0}, {ASS_CLK_GATE, 0}, }; static int s5pv210_audss_clk_suspend(void) { int i; for (i = 0; i < ARRAY_SIZE(reg_save); i++) reg_save[i][1] = readl(reg_base + reg_save[i][0]); return 0; } static void s5pv210_audss_clk_resume(void) { int i; for (i = 0; i < ARRAY_SIZE(reg_save); i++) writel(reg_save[i][1], reg_base + reg_save[i][0]); } static struct syscore_ops s5pv210_audss_clk_syscore_ops = { .suspend = s5pv210_audss_clk_suspend, .resume = s5pv210_audss_clk_resume, }; #endif /* CONFIG_PM_SLEEP */ /* register s5pv210_audss clocks */ static int s5pv210_audss_clk_probe(struct platform_device *pdev) { int i, ret = 0; const char *mout_audss_p[2]; const char *mout_i2s_p[3]; const char *hclk_p; struct clk_hw **clk_table; struct clk *hclk, *pll_ref, *pll_in, *cdclk, *sclk_audio; reg_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(reg_base)) return PTR_ERR(reg_base); clk_data = devm_kzalloc(&pdev->dev, struct_size(clk_data, hws, AUDSS_MAX_CLKS), GFP_KERNEL); if (!clk_data) return -ENOMEM; clk_data->num = AUDSS_MAX_CLKS; clk_table = clk_data->hws; hclk = devm_clk_get(&pdev->dev, "hclk"); if (IS_ERR(hclk)) { dev_err(&pdev->dev, "failed to get hclk clock\n"); return PTR_ERR(hclk); } pll_in = devm_clk_get(&pdev->dev, "fout_epll"); if (IS_ERR(pll_in)) { dev_err(&pdev->dev, "failed to get fout_epll clock\n"); return PTR_ERR(pll_in); } sclk_audio = devm_clk_get(&pdev->dev, "sclk_audio0"); if (IS_ERR(sclk_audio)) { dev_err(&pdev->dev, "failed to get sclk_audio0 clock\n"); return PTR_ERR(sclk_audio); } /* iiscdclk0 is an optional external I2S codec clock */ cdclk = devm_clk_get(&pdev->dev, "iiscdclk0"); pll_ref = devm_clk_get(&pdev->dev, "xxti"); if (!IS_ERR(pll_ref)) mout_audss_p[0] = __clk_get_name(pll_ref); else mout_audss_p[0] = "xxti"; mout_audss_p[1] = __clk_get_name(pll_in); clk_table[CLK_MOUT_AUDSS] = clk_hw_register_mux(NULL, "mout_audss", mout_audss_p, ARRAY_SIZE(mout_audss_p), CLK_SET_RATE_NO_REPARENT, reg_base + ASS_CLK_SRC, 0, 1, 0, &lock); mout_i2s_p[0] = "mout_audss"; if (!IS_ERR(cdclk)) mout_i2s_p[1] = __clk_get_name(cdclk); else mout_i2s_p[1] = "iiscdclk0"; mout_i2s_p[2] = __clk_get_name(sclk_audio); clk_table[CLK_MOUT_I2S_A] = clk_hw_register_mux(NULL, "mout_i2s_audss", mout_i2s_p, ARRAY_SIZE(mout_i2s_p), CLK_SET_RATE_NO_REPARENT, reg_base + ASS_CLK_SRC, 2, 2, 0, &lock); clk_table[CLK_DOUT_AUD_BUS] = clk_hw_register_divider(NULL, "dout_aud_bus", "mout_audss", 0, reg_base + ASS_CLK_DIV, 0, 4, 0, &lock); clk_table[CLK_DOUT_I2S_A] = clk_hw_register_divider(NULL, "dout_i2s_audss", "mout_i2s_audss", 0, reg_base + ASS_CLK_DIV, 4, 4, 0, &lock); clk_table[CLK_I2S] = clk_hw_register_gate(NULL, "i2s_audss", "dout_i2s_audss", CLK_SET_RATE_PARENT, reg_base + ASS_CLK_GATE, 6, 0, &lock); hclk_p = __clk_get_name(hclk); clk_table[CLK_HCLK_I2S] = clk_hw_register_gate(NULL, "hclk_i2s_audss", hclk_p, CLK_IGNORE_UNUSED, reg_base + ASS_CLK_GATE, 5, 0, &lock); clk_table[CLK_HCLK_UART] = clk_hw_register_gate(NULL, "hclk_uart_audss", hclk_p, CLK_IGNORE_UNUSED, reg_base + ASS_CLK_GATE, 4, 0, &lock); clk_table[CLK_HCLK_HWA] = clk_hw_register_gate(NULL, "hclk_hwa_audss", hclk_p, CLK_IGNORE_UNUSED, reg_base + ASS_CLK_GATE, 3, 0, &lock); clk_table[CLK_HCLK_DMA] = clk_hw_register_gate(NULL, "hclk_dma_audss", hclk_p, CLK_IGNORE_UNUSED, reg_base + ASS_CLK_GATE, 2, 0, &lock); clk_table[CLK_HCLK_BUF] = clk_hw_register_gate(NULL, "hclk_buf_audss", hclk_p, CLK_IGNORE_UNUSED, reg_base + ASS_CLK_GATE, 1, 0, &lock); clk_table[CLK_HCLK_RP] = clk_hw_register_gate(NULL, "hclk_rp_audss", hclk_p, CLK_IGNORE_UNUSED, reg_base + ASS_CLK_GATE, 0, 0, &lock); for (i = 0; i < clk_data->num; i++) { if (IS_ERR(clk_table[i])) { dev_err(&pdev->dev, "failed to register clock %d\n", i); ret = PTR_ERR(clk_table[i]); goto unregister; } } ret = of_clk_add_hw_provider(pdev->dev.of_node, of_clk_hw_onecell_get, clk_data); if (ret) { dev_err(&pdev->dev, "failed to add clock provider\n"); goto unregister; } #ifdef CONFIG_PM_SLEEP register_syscore_ops(&s5pv210_audss_clk_syscore_ops); #endif return 0; unregister: for (i = 0; i < clk_data->num; i++) { if (!IS_ERR(clk_table[i])) clk_hw_unregister(clk_table[i]); } return ret; } static const struct of_device_id s5pv210_audss_clk_of_match[] = { { .compatible = "samsung,s5pv210-audss-clock", }, {}, }; static struct platform_driver s5pv210_audss_clk_driver = { .driver = { .name = "s5pv210-audss-clk", .suppress_bind_attrs = true, .of_match_table = s5pv210_audss_clk_of_match, }, .probe = s5pv210_audss_clk_probe, }; static int __init s5pv210_audss_clk_init(void) { return platform_driver_register(&s5pv210_audss_clk_driver); } core_initcall(s5pv210_audss_clk_init);
// SPDX-License-Identifier: GPL-2.0 /* * ip30-irq.c: Highlevel interrupt handling for IP30 architecture. */ #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/percpu.h> #include <linux/spinlock.h> #include <linux/tick.h> #include <linux/types.h> #include <asm/irq_cpu.h> #include <asm/sgi/heart.h> #include "ip30-common.h" struct heart_irq_data { u64 *irq_mask; int cpu; }; static DECLARE_BITMAP(heart_irq_map, HEART_NUM_IRQS); static DEFINE_PER_CPU(unsigned long, irq_enable_mask); static inline int heart_alloc_int(void) { int bit; again: bit = find_first_zero_bit(heart_irq_map, HEART_NUM_IRQS); if (bit >= HEART_NUM_IRQS) return -ENOSPC; if (test_and_set_bit(bit, heart_irq_map)) goto again; return bit; } static void ip30_error_irq(struct irq_desc *desc) { u64 pending, mask, cause, error_irqs, err_reg; int cpu = smp_processor_id(); int i; pending = heart_read(&heart_regs->isr); mask = heart_read(&heart_regs->imr[cpu]); cause = heart_read(&heart_regs->cause); error_irqs = (pending & HEART_L4_INT_MASK & mask); /* Bail if there's nothing to process (how did we get here, then?) */ if (unlikely(!error_irqs)) return; /* Prevent any of the error IRQs from firing again. */ heart_write(mask & ~(pending), &heart_regs->imr[cpu]); /* Ack all error IRQs. */ heart_write(HEART_L4_INT_MASK, &heart_regs->clear_isr); /* * If we also have a cause value, then something happened, so loop * through the error IRQs and report a "heart attack" for each one * and print the value of the HEART cause register. This is really * primitive right now, but it should hopefully work until a more * robust error handling routine can be put together. * * Refer to heart.h for the HC_* macros to work out the cause * that got us here. */ if (cause) { pr_alert("IP30: CPU%d: HEART ATTACK! ISR = 0x%.16llx, IMR = 0x%.16llx, CAUSE = 0x%.16llx\n", cpu, pending, mask, cause); if (cause & HC_COR_MEM_ERR) { err_reg = heart_read(&heart_regs->mem_err_addr); pr_alert(" HEART_MEMERR_ADDR = 0x%.16llx\n", err_reg); } /* i = 63; i >= 51; i-- */ for (i = HEART_ERR_MASK_END; i >= HEART_ERR_MASK_START; i--) if ((pending >> i) & 1) pr_alert(" HEART Error IRQ #%d\n", i); /* XXX: Seems possible to loop forever here, so panic(). */ panic("IP30: Fatal Error !\n"); } /* Unmask the error IRQs. */ heart_write(mask, &heart_regs->imr[cpu]); } static void ip30_normal_irq(struct irq_desc *desc) { int cpu = smp_processor_id(); struct irq_domain *domain; u64 pend, mask; int ret; pend = heart_read(&heart_regs->isr); mask = (heart_read(&heart_regs->imr[cpu]) & (HEART_L0_INT_MASK | HEART_L1_INT_MASK | HEART_L2_INT_MASK)); pend &= mask; if (unlikely(!pend)) return; #ifdef CONFIG_SMP if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_0)) { heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0), &heart_regs->clear_isr); scheduler_ipi(); } else if (pend & BIT_ULL(HEART_L2_INT_RESCHED_CPU_1)) { heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_1), &heart_regs->clear_isr); scheduler_ipi(); } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_0)) { heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0), &heart_regs->clear_isr); generic_smp_call_function_interrupt(); } else if (pend & BIT_ULL(HEART_L2_INT_CALL_CPU_1)) { heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_1), &heart_regs->clear_isr); generic_smp_call_function_interrupt(); } else #endif { domain = irq_desc_get_handler_data(desc); ret = generic_handle_domain_irq(domain, __ffs(pend)); if (ret) spurious_interrupt(); } } static void ip30_ack_heart_irq(struct irq_data *d) { heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr); } static void ip30_mask_heart_irq(struct irq_data *d) { struct heart_irq_data *hd = irq_data_get_irq_chip_data(d); unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu); clear_bit(d->hwirq, mask); heart_write(*mask, &heart_regs->imr[hd->cpu]); } static void ip30_mask_and_ack_heart_irq(struct irq_data *d) { struct heart_irq_data *hd = irq_data_get_irq_chip_data(d); unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu); clear_bit(d->hwirq, mask); heart_write(*mask, &heart_regs->imr[hd->cpu]); heart_write(BIT_ULL(d->hwirq), &heart_regs->clear_isr); } static void ip30_unmask_heart_irq(struct irq_data *d) { struct heart_irq_data *hd = irq_data_get_irq_chip_data(d); unsigned long *mask = &per_cpu(irq_enable_mask, hd->cpu); set_bit(d->hwirq, mask); heart_write(*mask, &heart_regs->imr[hd->cpu]); } static int ip30_set_heart_irq_affinity(struct irq_data *d, const struct cpumask *mask, bool force) { struct heart_irq_data *hd = irq_data_get_irq_chip_data(d); if (!hd) return -EINVAL; if (irqd_is_started(d)) ip30_mask_and_ack_heart_irq(d); hd->cpu = cpumask_first_and(mask, cpu_online_mask); if (irqd_is_started(d)) ip30_unmask_heart_irq(d); irq_data_update_effective_affinity(d, cpumask_of(hd->cpu)); return 0; } static struct irq_chip heart_irq_chip = { .name = "HEART", .irq_ack = ip30_ack_heart_irq, .irq_mask = ip30_mask_heart_irq, .irq_mask_ack = ip30_mask_and_ack_heart_irq, .irq_unmask = ip30_unmask_heart_irq, .irq_set_affinity = ip30_set_heart_irq_affinity, }; static int heart_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { struct irq_alloc_info *info = arg; struct heart_irq_data *hd; int hwirq; if (nr_irqs > 1 || !info) return -EINVAL; hd = kzalloc(sizeof(*hd), GFP_KERNEL); if (!hd) return -ENOMEM; hwirq = heart_alloc_int(); if (hwirq < 0) { kfree(hd); return -EAGAIN; } irq_domain_set_info(domain, virq, hwirq, &heart_irq_chip, hd, handle_level_irq, NULL, NULL); return 0; } static void heart_domain_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) { struct irq_data *irqd; if (nr_irqs > 1) return; irqd = irq_domain_get_irq_data(domain, virq); if (irqd) { clear_bit(irqd->hwirq, heart_irq_map); kfree(irqd->chip_data); } } static const struct irq_domain_ops heart_domain_ops = { .alloc = heart_domain_alloc, .free = heart_domain_free, }; void __init ip30_install_ipi(void) { int cpu = smp_processor_id(); unsigned long *mask = &per_cpu(irq_enable_mask, cpu); set_bit(HEART_L2_INT_RESCHED_CPU_0 + cpu, mask); heart_write(BIT_ULL(HEART_L2_INT_RESCHED_CPU_0 + cpu), &heart_regs->clear_isr); set_bit(HEART_L2_INT_CALL_CPU_0 + cpu, mask); heart_write(BIT_ULL(HEART_L2_INT_CALL_CPU_0 + cpu), &heart_regs->clear_isr); heart_write(*mask, &heart_regs->imr[cpu]); } void __init arch_init_irq(void) { struct irq_domain *domain; struct fwnode_handle *fn; unsigned long *mask; int i; mips_cpu_irq_init(); /* Mask all IRQs. */ heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[0]); heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[1]); heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[2]); heart_write(HEART_CLR_ALL_MASK, &heart_regs->imr[3]); /* Ack everything. */ heart_write(HEART_ACK_ALL_MASK, &heart_regs->clear_isr); /* Enable specific HEART error IRQs for each CPU. */ mask = &per_cpu(irq_enable_mask, 0); *mask |= HEART_CPU0_ERR_MASK; heart_write(*mask, &heart_regs->imr[0]); mask = &per_cpu(irq_enable_mask, 1); *mask |= HEART_CPU1_ERR_MASK; heart_write(*mask, &heart_regs->imr[1]); /* * Some HEART bits are reserved by hardware or by software convention. * Mark these as reserved right away so they won't be accidentally * used later. */ set_bit(HEART_L0_INT_GENERIC, heart_irq_map); set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_0, heart_irq_map); set_bit(HEART_L0_INT_FLOW_CTRL_HWTR_1, heart_irq_map); set_bit(HEART_L2_INT_RESCHED_CPU_0, heart_irq_map); set_bit(HEART_L2_INT_RESCHED_CPU_1, heart_irq_map); set_bit(HEART_L2_INT_CALL_CPU_0, heart_irq_map); set_bit(HEART_L2_INT_CALL_CPU_1, heart_irq_map); set_bit(HEART_L3_INT_TIMER, heart_irq_map); /* Reserve the error interrupts (#51 to #63). */ for (i = HEART_L4_INT_XWID_ERR_9; i <= HEART_L4_INT_HEART_EXCP; i++) set_bit(i, heart_irq_map); fn = irq_domain_alloc_named_fwnode("HEART"); WARN_ON(fn == NULL); if (!fn) return; domain = irq_domain_create_linear(fn, HEART_NUM_IRQS, &heart_domain_ops, NULL); WARN_ON(domain == NULL); if (!domain) return; irq_set_default_host(domain); irq_set_percpu_devid(IP30_HEART_L0_IRQ); irq_set_chained_handler_and_data(IP30_HEART_L0_IRQ, ip30_normal_irq, domain); irq_set_percpu_devid(IP30_HEART_L1_IRQ); irq_set_chained_handler_and_data(IP30_HEART_L1_IRQ, ip30_normal_irq, domain); irq_set_percpu_devid(IP30_HEART_L2_IRQ); irq_set_chained_handler_and_data(IP30_HEART_L2_IRQ, ip30_normal_irq, domain); irq_set_percpu_devid(IP30_HEART_ERR_IRQ); irq_set_chained_handler_and_data(IP30_HEART_ERR_IRQ, ip30_error_irq, domain); }
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2013--2024 Intel Corporation */ #include <linux/bitfield.h> #include <linux/bits.h> #include <linux/err.h> #include <linux/dma-mapping.h> #include <linux/gfp_types.h> #include <linux/math64.h> #include <linux/sizes.h> #include <linux/types.h> #include "ipu6.h" #include "ipu6-bus.h" #include "ipu6-cpd.h" #include "ipu6-dma.h" /* 15 entries + header*/ #define MAX_PKG_DIR_ENT_CNT 16 /* 2 qword per entry/header */ #define PKG_DIR_ENT_LEN 2 /* PKG_DIR size in bytes */ #define PKG_DIR_SIZE ((MAX_PKG_DIR_ENT_CNT) * \ (PKG_DIR_ENT_LEN) * sizeof(u64)) /* _IUPKDR_ */ #define PKG_DIR_HDR_MARK 0x5f4955504b44525fULL /* $CPD */ #define CPD_HDR_MARK 0x44504324 #define MAX_MANIFEST_SIZE (SZ_2K * sizeof(u32)) #define MAX_METADATA_SIZE SZ_64K #define MAX_COMPONENT_ID 127 #define MAX_COMPONENT_VERSION 0xffff #define MANIFEST_IDX 0 #define METADATA_IDX 1 #define MODULEDATA_IDX 2 /* * PKG_DIR Entry (type == id) * 63:56 55 54:48 47:32 31:24 23:0 * Rsvd Rsvd Type Version Rsvd Size */ #define PKG_DIR_SIZE_MASK GENMASK_ULL(23, 0) #define PKG_DIR_VERSION_MASK GENMASK_ULL(47, 32) #define PKG_DIR_TYPE_MASK GENMASK_ULL(54, 48) static inline const struct ipu6_cpd_ent *ipu6_cpd_get_entry(const void *cpd, u8 idx) { const struct ipu6_cpd_hdr *cpd_hdr = cpd; const struct ipu6_cpd_ent *ent; ent = (const struct ipu6_cpd_ent *)((const u8 *)cpd + cpd_hdr->hdr_len); return ent + idx; } #define ipu6_cpd_get_manifest(cpd) ipu6_cpd_get_entry(cpd, MANIFEST_IDX) #define ipu6_cpd_get_metadata(cpd) ipu6_cpd_get_entry(cpd, METADATA_IDX) #define ipu6_cpd_get_moduledata(cpd) ipu6_cpd_get_entry(cpd, MODULEDATA_IDX) static const struct ipu6_cpd_metadata_cmpnt_hdr * ipu6_cpd_metadata_get_cmpnt(struct ipu6_device *isp, const void *metadata, unsigned int metadata_size, u8 idx) { size_t extn_size = sizeof(struct ipu6_cpd_metadata_extn); size_t cmpnt_count = metadata_size - extn_size; cmpnt_count = div_u64(cmpnt_count, isp->cpd_metadata_cmpnt_size); if (idx > MAX_COMPONENT_ID || idx >= cmpnt_count) { dev_err(&isp->pdev->dev, "Component index out of range (%d)\n", idx); return ERR_PTR(-EINVAL); } return metadata + extn_size + idx * isp->cpd_metadata_cmpnt_size; } static u32 ipu6_cpd_metadata_cmpnt_version(struct ipu6_device *isp, const void *metadata, unsigned int metadata_size, u8 idx) { const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt; cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); if (IS_ERR(cmpnt)) return PTR_ERR(cmpnt); return cmpnt->ver; } static int ipu6_cpd_metadata_get_cmpnt_id(struct ipu6_device *isp, const void *metadata, unsigned int metadata_size, u8 idx) { const struct ipu6_cpd_metadata_cmpnt_hdr *cmpnt; cmpnt = ipu6_cpd_metadata_get_cmpnt(isp, metadata, metadata_size, idx); if (IS_ERR(cmpnt)) return PTR_ERR(cmpnt); return cmpnt->id; } static int ipu6_cpd_parse_module_data(struct ipu6_device *isp, const void *module_data, unsigned int module_data_size, dma_addr_t dma_addr_module_data, u64 *pkg_dir, const void *metadata, unsigned int metadata_size) { const struct ipu6_cpd_module_data_hdr *module_data_hdr; const struct ipu6_cpd_hdr *dir_hdr; const struct ipu6_cpd_ent *dir_ent; unsigned int i; u8 len; if (!module_data) return -EINVAL; module_data_hdr = module_data; dir_hdr = module_data + module_data_hdr->hdr_len; len = dir_hdr->hdr_len; dir_ent = (const struct ipu6_cpd_ent *)(((u8 *)dir_hdr) + len); pkg_dir[0] = PKG_DIR_HDR_MARK; /* pkg_dir entry count = component count + pkg_dir header */ pkg_dir[1] = dir_hdr->ent_cnt + 1; for (i = 0; i < dir_hdr->ent_cnt; i++, dir_ent++) { u64 *p = &pkg_dir[PKG_DIR_ENT_LEN * (1 + i)]; int ver, id; *p++ = dma_addr_module_data + dir_ent->offset; id = ipu6_cpd_metadata_get_cmpnt_id(isp, metadata, metadata_size, i); if (id < 0 || id > MAX_COMPONENT_ID) { dev_err(&isp->pdev->dev, "Invalid CPD component id\n"); return -EINVAL; } ver = ipu6_cpd_metadata_cmpnt_version(isp, metadata, metadata_size, i); if (ver < 0 || ver > MAX_COMPONENT_VERSION) { dev_err(&isp->pdev->dev, "Invalid CPD component version\n"); return -EINVAL; } *p = FIELD_PREP(PKG_DIR_SIZE_MASK, dir_ent->len) | FIELD_PREP(PKG_DIR_TYPE_MASK, id) | FIELD_PREP(PKG_DIR_VERSION_MASK, ver); } return 0; } int ipu6_cpd_create_pkg_dir(struct ipu6_bus_device *adev, const void *src) { dma_addr_t dma_addr_src = sg_dma_address(adev->fw_sgt.sgl); const struct ipu6_cpd_ent *ent, *man_ent, *met_ent; struct ipu6_device *isp = adev->isp; unsigned int man_sz, met_sz; void *pkg_dir_pos; int ret; man_ent = ipu6_cpd_get_manifest(src); man_sz = man_ent->len; met_ent = ipu6_cpd_get_metadata(src); met_sz = met_ent->len; adev->pkg_dir_size = PKG_DIR_SIZE + man_sz + met_sz; adev->pkg_dir = ipu6_dma_alloc(adev, adev->pkg_dir_size, &adev->pkg_dir_dma_addr, GFP_KERNEL, 0); if (!adev->pkg_dir) return -ENOMEM; /* * pkg_dir entry/header: * qword | 63:56 | 55 | 54:48 | 47:32 | 31:24 | 23:0 * N Address/Offset/"_IUPKDR_" * N + 1 | rsvd | rsvd | type | ver | rsvd | size * * We can ignore other fields that size in N + 1 qword as they * are 0 anyway. Just setting size for now. */ ent = ipu6_cpd_get_moduledata(src); ret = ipu6_cpd_parse_module_data(isp, src + ent->offset, ent->len, dma_addr_src + ent->offset, adev->pkg_dir, src + met_ent->offset, met_ent->len); if (ret) { dev_err(&isp->pdev->dev, "Failed to parse module data\n"); ipu6_dma_free(adev, adev->pkg_dir_size, adev->pkg_dir, adev->pkg_dir_dma_addr, 0); return ret; } /* Copy manifest after pkg_dir */ pkg_dir_pos = adev->pkg_dir + PKG_DIR_ENT_LEN * MAX_PKG_DIR_ENT_CNT; memcpy(pkg_dir_pos, src + man_ent->offset, man_sz); /* Copy metadata after manifest */ pkg_dir_pos += man_sz; memcpy(pkg_dir_pos, src + met_ent->offset, met_sz); ipu6_dma_sync_single(adev, adev->pkg_dir_dma_addr, adev->pkg_dir_size); return 0; } EXPORT_SYMBOL_NS_GPL(ipu6_cpd_create_pkg_dir, "INTEL_IPU6"); void ipu6_cpd_free_pkg_dir(struct ipu6_bus_device *adev) { ipu6_dma_free(adev, adev->pkg_dir_size, adev->pkg_dir, adev->pkg_dir_dma_addr, 0); } EXPORT_SYMBOL_NS_GPL(ipu6_cpd_free_pkg_dir, "INTEL_IPU6"); static int ipu6_cpd_validate_cpd(struct ipu6_device *isp, const void *cpd, unsigned long cpd_size, unsigned long data_size) { const struct ipu6_cpd_hdr *cpd_hdr = cpd; const struct ipu6_cpd_ent *ent; unsigned int i; u8 len; len = cpd_hdr->hdr_len; /* Ensure cpd hdr is within moduledata */ if (cpd_size < len) { dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); return -EINVAL; } /* Sanity check for CPD header */ if ((cpd_size - len) / sizeof(*ent) < cpd_hdr->ent_cnt) { dev_err(&isp->pdev->dev, "Invalid CPD header\n"); return -EINVAL; } /* Ensure that all entries are within moduledata */ ent = (const struct ipu6_cpd_ent *)(((const u8 *)cpd_hdr) + len); for (i = 0; i < cpd_hdr->ent_cnt; i++, ent++) { if (data_size < ent->offset || data_size - ent->offset < ent->len) { dev_err(&isp->pdev->dev, "Invalid CPD entry (%d)\n", i); return -EINVAL; } } return 0; } static int ipu6_cpd_validate_moduledata(struct ipu6_device *isp, const void *moduledata, u32 moduledata_size) { const struct ipu6_cpd_module_data_hdr *mod_hdr = moduledata; int ret; /* Ensure moduledata hdr is within moduledata */ if (moduledata_size < sizeof(*mod_hdr) || moduledata_size < mod_hdr->hdr_len) { dev_err(&isp->pdev->dev, "Invalid CPD moduledata size\n"); return -EINVAL; } dev_info(&isp->pdev->dev, "FW version: %x\n", mod_hdr->fw_pkg_date); ret = ipu6_cpd_validate_cpd(isp, moduledata + mod_hdr->hdr_len, moduledata_size - mod_hdr->hdr_len, moduledata_size); if (ret) { dev_err(&isp->pdev->dev, "Invalid CPD in moduledata\n"); return ret; } return 0; } static int ipu6_cpd_validate_metadata(struct ipu6_device *isp, const void *metadata, u32 meta_size) { const struct ipu6_cpd_metadata_extn *extn = metadata; /* Sanity check for metadata size */ if (meta_size < sizeof(*extn) || meta_size > MAX_METADATA_SIZE) { dev_err(&isp->pdev->dev, "Invalid CPD metadata\n"); return -EINVAL; } /* Validate extension and image types */ if (extn->extn_type != IPU6_CPD_METADATA_EXTN_TYPE_IUNIT || extn->img_type != IPU6_CPD_METADATA_IMAGE_TYPE_MAIN_FIRMWARE) { dev_err(&isp->pdev->dev, "Invalid CPD metadata descriptor img_type (%d)\n", extn->img_type); return -EINVAL; } /* Validate metadata size multiple of metadata components */ if ((meta_size - sizeof(*extn)) % isp->cpd_metadata_cmpnt_size) { dev_err(&isp->pdev->dev, "Invalid CPD metadata size\n"); return -EINVAL; } return 0; } int ipu6_cpd_validate_cpd_file(struct ipu6_device *isp, const void *cpd_file, unsigned long cpd_file_size) { const struct ipu6_cpd_hdr *hdr = cpd_file; const struct ipu6_cpd_ent *ent; int ret; ret = ipu6_cpd_validate_cpd(isp, cpd_file, cpd_file_size, cpd_file_size); if (ret) { dev_err(&isp->pdev->dev, "Invalid CPD in file\n"); return ret; } /* Check for CPD file marker */ if (hdr->hdr_mark != CPD_HDR_MARK) { dev_err(&isp->pdev->dev, "Invalid CPD header\n"); return -EINVAL; } /* Sanity check for manifest size */ ent = ipu6_cpd_get_manifest(cpd_file); if (ent->len > MAX_MANIFEST_SIZE) { dev_err(&isp->pdev->dev, "Invalid CPD manifest size\n"); return -EINVAL; } /* Validate metadata */ ent = ipu6_cpd_get_metadata(cpd_file); ret = ipu6_cpd_validate_metadata(isp, cpd_file + ent->offset, ent->len); if (ret) { dev_err(&isp->pdev->dev, "Invalid CPD metadata\n"); return ret; } /* Validate moduledata */ ent = ipu6_cpd_get_moduledata(cpd_file); ret = ipu6_cpd_validate_moduledata(isp, cpd_file + ent->offset, ent->len); if (ret) dev_err(&isp->pdev->dev, "Invalid CPD moduledata\n"); return ret; }
// SPDX-License-Identifier: GPL-2.0 OR MIT // // Copyright (C) 2023 chargebyte GmbH #include "imx6ull-tarragon-common.dtsi" / { model = "chargebyte Tarragon Slave"; compatible = "chargebyte,imx6ull-tarragon-slave", "fsl,imx6ull"; }; &ecspi2 { status = "okay"; qca700x_cp: ethernet@0 { reg = <0x0>; compatible = "qca,qca7000"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_qca700x_cp_int &pinctrl_qca700x_cp_rst &pinctrl_qca700x_cp_btld>; interrupt-parent = <&gpio2>; interrupts = <19 IRQ_TYPE_EDGE_RISING>; spi-cpha; spi-cpol; spi-max-frequency = <12000000>; }; }; &fec1 { status = "okay"; };
/* * arch/xtensa/platforms/iss/simdisk.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001-2013 Tensilica Inc. * Authors Victor Prupis */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/proc_fs.h> #include <linux/uaccess.h> #include <platform/simcall.h> #define SIMDISK_MAJOR 240 #define SIMDISK_MINORS 1 #define MAX_SIMDISK_COUNT 10 struct simdisk { const char *filename; spinlock_t lock; struct gendisk *gd; struct proc_dir_entry *procfile; int users; unsigned long size; int fd; }; static int simdisk_count = CONFIG_BLK_DEV_SIMDISK_COUNT; module_param(simdisk_count, int, S_IRUGO); MODULE_PARM_DESC(simdisk_count, "Number of simdisk units."); static int n_files; static const char *filename[MAX_SIMDISK_COUNT] = { #ifdef CONFIG_SIMDISK0_FILENAME CONFIG_SIMDISK0_FILENAME, #ifdef CONFIG_SIMDISK1_FILENAME CONFIG_SIMDISK1_FILENAME, #endif #endif }; static int simdisk_param_set_filename(const char *val, const struct kernel_param *kp) { if (n_files < ARRAY_SIZE(filename)) filename[n_files++] = val; else return -EINVAL; return 0; } static const struct kernel_param_ops simdisk_param_ops_filename = { .set = simdisk_param_set_filename, }; module_param_cb(filename, &simdisk_param_ops_filename, &n_files, 0); MODULE_PARM_DESC(filename, "Backing storage filename."); static int simdisk_major = SIMDISK_MAJOR; static void simdisk_transfer(struct simdisk *dev, unsigned long sector, unsigned long nsect, char *buffer, int write) { unsigned long offset = sector << SECTOR_SHIFT; unsigned long nbytes = nsect << SECTOR_SHIFT; if (offset > dev->size || dev->size - offset < nbytes) { pr_notice("Beyond-end %s (%ld %ld)\n", write ? "write" : "read", offset, nbytes); return; } spin_lock(&dev->lock); while (nbytes > 0) { unsigned long io; simc_lseek(dev->fd, offset, SEEK_SET); READ_ONCE(*buffer); if (write) io = simc_write(dev->fd, buffer, nbytes); else io = simc_read(dev->fd, buffer, nbytes); if (io == -1) { pr_err("SIMDISK: IO error %d\n", errno); break; } buffer += io; offset += io; nbytes -= io; } spin_unlock(&dev->lock); } static void simdisk_submit_bio(struct bio *bio) { struct simdisk *dev = bio->bi_bdev->bd_disk->private_data; struct bio_vec bvec; struct bvec_iter iter; sector_t sector = bio->bi_iter.bi_sector; bio_for_each_segment(bvec, bio, iter) { char *buffer = bvec_kmap_local(&bvec); unsigned len = bvec.bv_len >> SECTOR_SHIFT; simdisk_transfer(dev, sector, len, buffer, bio_data_dir(bio) == WRITE); sector += len; kunmap_local(buffer); } bio_endio(bio); } static int simdisk_open(struct gendisk *disk, blk_mode_t mode) { struct simdisk *dev = disk->private_data; spin_lock(&dev->lock); ++dev->users; spin_unlock(&dev->lock); return 0; } static void simdisk_release(struct gendisk *disk) { struct simdisk *dev = disk->private_data; spin_lock(&dev->lock); --dev->users; spin_unlock(&dev->lock); } static const struct block_device_operations simdisk_ops = { .owner = THIS_MODULE, .submit_bio = simdisk_submit_bio, .open = simdisk_open, .release = simdisk_release, }; static struct simdisk *sddev; static struct proc_dir_entry *simdisk_procdir; static int simdisk_attach(struct simdisk *dev, const char *filename) { int err = 0; filename = kstrdup(filename, GFP_KERNEL); if (filename == NULL) return -ENOMEM; spin_lock(&dev->lock); if (dev->fd != -1) { err = -EBUSY; goto out; } dev->fd = simc_open(filename, O_RDWR, 0); if (dev->fd == -1) { pr_err("SIMDISK: Can't open %s: %d\n", filename, errno); err = -ENODEV; goto out; } dev->size = simc_lseek(dev->fd, 0, SEEK_END); set_capacity(dev->gd, dev->size >> SECTOR_SHIFT); dev->filename = filename; pr_info("SIMDISK: %s=%s\n", dev->gd->disk_name, dev->filename); out: if (err) kfree(filename); spin_unlock(&dev->lock); return err; } static int simdisk_detach(struct simdisk *dev) { int err = 0; spin_lock(&dev->lock); if (dev->users != 0) { err = -EBUSY; } else if (dev->fd != -1) { if (simc_close(dev->fd)) { pr_err("SIMDISK: error closing %s: %d\n", dev->filename, errno); err = -EIO; } else { pr_info("SIMDISK: %s detached from %s\n", dev->gd->disk_name, dev->filename); dev->fd = -1; kfree(dev->filename); dev->filename = NULL; } } spin_unlock(&dev->lock); return err; } static ssize_t proc_read_simdisk(struct file *file, char __user *buf, size_t size, loff_t *ppos) { struct simdisk *dev = pde_data(file_inode(file)); const char *s = dev->filename; if (s) { ssize_t len = strlen(s); char *temp = kmalloc(len + 2, GFP_KERNEL); if (!temp) return -ENOMEM; len = scnprintf(temp, len + 2, "%s\n", s); len = simple_read_from_buffer(buf, size, ppos, temp, len); kfree(temp); return len; } return simple_read_from_buffer(buf, size, ppos, "\n", 1); } static ssize_t proc_write_simdisk(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { char *tmp = memdup_user_nul(buf, count); struct simdisk *dev = pde_data(file_inode(file)); int err; if (IS_ERR(tmp)) return PTR_ERR(tmp); err = simdisk_detach(dev); if (err != 0) goto out_free; if (count > 0 && tmp[count - 1] == '\n') tmp[count - 1] = 0; if (tmp[0]) err = simdisk_attach(dev, tmp); if (err == 0) err = count; out_free: kfree(tmp); return err; } static const struct proc_ops simdisk_proc_ops = { .proc_read = proc_read_simdisk, .proc_write = proc_write_simdisk, .proc_lseek = default_llseek, }; static int __init simdisk_setup(struct simdisk *dev, int which, struct proc_dir_entry *procdir) { struct queue_limits lim = { .features = BLK_FEAT_ROTATIONAL, }; char tmp[2] = { '0' + which, 0 }; int err; dev->fd = -1; dev->filename = NULL; spin_lock_init(&dev->lock); dev->users = 0; dev->gd = blk_alloc_disk(&lim, NUMA_NO_NODE); if (IS_ERR(dev->gd)) { err = PTR_ERR(dev->gd); goto out; } dev->gd->major = simdisk_major; dev->gd->first_minor = which; dev->gd->minors = SIMDISK_MINORS; dev->gd->fops = &simdisk_ops; dev->gd->private_data = dev; snprintf(dev->gd->disk_name, 32, "simdisk%d", which); set_capacity(dev->gd, 0); err = add_disk(dev->gd); if (err) goto out_cleanup_disk; dev->procfile = proc_create_data(tmp, 0644, procdir, &simdisk_proc_ops, dev); return 0; out_cleanup_disk: put_disk(dev->gd); out: return err; } static int __init simdisk_init(void) { int i; if (register_blkdev(simdisk_major, "simdisk") < 0) { pr_err("SIMDISK: register_blkdev: %d\n", simdisk_major); return -EIO; } pr_info("SIMDISK: major: %d\n", simdisk_major); if (n_files > simdisk_count) simdisk_count = n_files; if (simdisk_count > MAX_SIMDISK_COUNT) simdisk_count = MAX_SIMDISK_COUNT; sddev = kmalloc_array(simdisk_count, sizeof(*sddev), GFP_KERNEL); if (sddev == NULL) goto out_unregister; simdisk_procdir = proc_mkdir("simdisk", 0); if (simdisk_procdir == NULL) goto out_free_unregister; for (i = 0; i < simdisk_count; ++i) { if (simdisk_setup(sddev + i, i, simdisk_procdir) == 0) { if (filename[i] != NULL && filename[i][0] != 0 && (n_files == 0 || i < n_files)) simdisk_attach(sddev + i, filename[i]); } } return 0; out_free_unregister: kfree(sddev); out_unregister: unregister_blkdev(simdisk_major, "simdisk"); return -ENOMEM; } module_init(simdisk_init); static void simdisk_teardown(struct simdisk *dev, int which, struct proc_dir_entry *procdir) { char tmp[2] = { '0' + which, 0 }; simdisk_detach(dev); if (dev->gd) { del_gendisk(dev->gd); put_disk(dev->gd); } remove_proc_entry(tmp, procdir); } static void __exit simdisk_exit(void) { int i; for (i = 0; i < simdisk_count; ++i) simdisk_teardown(sddev + i, i, simdisk_procdir); remove_proc_entry("simdisk", 0); kfree(sddev); unregister_blkdev(simdisk_major, "simdisk"); } module_exit(simdisk_exit); MODULE_ALIAS_BLOCKDEV_MAJOR(SIMDISK_MAJOR); MODULE_LICENSE("GPL");
// SPDX-License-Identifier: GPL-2.0-only /* Copyright(c) 2022 Intel Corporation */ #include <linux/crypto.h> #include <crypto/acompress.h> #include <crypto/internal/acompress.h> #include <crypto/scatterwalk.h> #include <linux/dma-mapping.h> #include <linux/workqueue.h> #include "adf_accel_devices.h" #include "adf_common_drv.h" #include "qat_bl.h" #include "qat_comp_req.h" #include "qat_compression.h" #include "qat_algs_send.h" static DEFINE_MUTEX(algs_lock); static unsigned int active_devs; enum direction { DECOMPRESSION = 0, COMPRESSION = 1, }; struct qat_compression_req; struct qat_compression_ctx { u8 comp_ctx[QAT_COMP_CTX_SIZE]; struct qat_compression_instance *inst; int (*qat_comp_callback)(struct qat_compression_req *qat_req, void *resp); }; struct qat_dst { bool is_null; int resubmitted; }; struct qat_compression_req { u8 req[QAT_COMP_REQ_SIZE]; struct qat_compression_ctx *qat_compression_ctx; struct acomp_req *acompress_req; struct qat_request_buffs buf; enum direction dir; int actual_dlen; struct qat_alg_req alg_req; struct work_struct resubmit; struct qat_dst dst; }; static int qat_alg_send_dc_message(struct qat_compression_req *qat_req, struct qat_compression_instance *inst, struct crypto_async_request *base) { struct qat_alg_req *alg_req = &qat_req->alg_req; alg_req->fw_req = (u32 *)&qat_req->req; alg_req->tx_ring = inst->dc_tx; alg_req->base = base; alg_req->backlog = &inst->backlog; return qat_alg_send_message(alg_req); } static void qat_comp_resubmit(struct work_struct *work) { struct qat_compression_req *qat_req = container_of(work, struct qat_compression_req, resubmit); struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx; struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; struct qat_request_buffs *qat_bufs = &qat_req->buf; struct qat_compression_instance *inst = ctx->inst; struct acomp_req *areq = qat_req->acompress_req; struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq); unsigned int dlen = CRYPTO_ACOMP_DST_MAX; u8 *req = qat_req->req; dma_addr_t dfbuf; int ret; areq->dlen = dlen; dev_dbg(&GET_DEV(accel_dev), "[%s][%s] retry NULL dst request - dlen = %d\n", crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)), qat_req->dir == COMPRESSION ? "comp" : "decomp", dlen); ret = qat_bl_realloc_map_new_dst(accel_dev, &areq->dst, dlen, qat_bufs, qat_algs_alloc_flags(&areq->base)); if (ret) goto err; qat_req->dst.resubmitted = true; dfbuf = qat_req->buf.bloutp; qat_comp_override_dst(req, dfbuf, dlen); ret = qat_alg_send_dc_message(qat_req, inst, &areq->base); if (ret != -ENOSPC) return; err: qat_bl_free_bufl(accel_dev, qat_bufs); acomp_request_complete(areq, ret); } static void qat_comp_generic_callback(struct qat_compression_req *qat_req, void *resp) { struct acomp_req *areq = qat_req->acompress_req; struct qat_compression_ctx *ctx = qat_req->qat_compression_ctx; struct adf_accel_dev *accel_dev = ctx->inst->accel_dev; struct crypto_acomp *tfm = crypto_acomp_reqtfm(areq); struct qat_compression_instance *inst = ctx->inst; int consumed, produced; s8 cmp_err, xlt_err; int res = -EBADMSG; int status; u8 cnv; status = qat_comp_get_cmp_status(resp); status |= qat_comp_get_xlt_status(resp); cmp_err = qat_comp_get_cmp_err(resp); xlt_err = qat_comp_get_xlt_err(resp); consumed = qat_comp_get_consumed_ctr(resp); produced = qat_comp_get_produced_ctr(resp); dev_dbg(&GET_DEV(accel_dev), "[%s][%s][%s] slen = %8d dlen = %8d consumed = %8d produced = %8d cmp_err = %3d xlt_err = %3d", crypto_tfm_alg_driver_name(crypto_acomp_tfm(tfm)), qat_req->dir == COMPRESSION ? "comp " : "decomp", status ? "ERR" : "OK ", areq->slen, areq->dlen, consumed, produced, cmp_err, xlt_err); areq->dlen = 0; if (qat_req->dir == DECOMPRESSION && qat_req->dst.is_null) { if (cmp_err == ERR_CODE_OVERFLOW_ERROR) { if (qat_req->dst.resubmitted) { dev_dbg(&GET_DEV(accel_dev), "Output does not fit destination buffer\n"); res = -EOVERFLOW; goto end; } INIT_WORK(&qat_req->resubmit, qat_comp_resubmit); adf_misc_wq_queue_work(&qat_req->resubmit); return; } } if (unlikely(status != ICP_QAT_FW_COMN_STATUS_FLAG_OK)) goto end; if (qat_req->dir == COMPRESSION) { cnv = qat_comp_get_cmp_cnv_flag(resp); if (unlikely(!cnv)) { dev_err(&GET_DEV(accel_dev), "Verified compression not supported\n"); goto end; } if (unlikely(produced > qat_req->actual_dlen)) { memset(inst->dc_data->ovf_buff, 0, inst->dc_data->ovf_buff_sz); dev_dbg(&GET_DEV(accel_dev), "Actual buffer overflow: produced=%d, dlen=%d\n", produced, qat_req->actual_dlen); goto end; } } res = 0; areq->dlen = produced; if (ctx->qat_comp_callback) res = ctx->qat_comp_callback(qat_req, resp); end: qat_bl_free_bufl(accel_dev, &qat_req->buf); acomp_request_complete(areq, res); } void qat_comp_alg_callback(void *resp) { struct qat_compression_req *qat_req = (void *)(__force long)qat_comp_get_opaque(resp); struct qat_instance_backlog *backlog = qat_req->alg_req.backlog; qat_comp_generic_callback(qat_req, resp); qat_alg_send_backlog(backlog); } static int qat_comp_alg_init_tfm(struct crypto_acomp *acomp_tfm) { struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_compression_instance *inst; int node; if (tfm->node == NUMA_NO_NODE) node = numa_node_id(); else node = tfm->node; memset(ctx, 0, sizeof(*ctx)); inst = qat_compression_get_instance_node(node); if (!inst) return -EINVAL; ctx->inst = inst; ctx->inst->build_deflate_ctx(ctx->comp_ctx); return 0; } static void qat_comp_alg_exit_tfm(struct crypto_acomp *acomp_tfm) { struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); qat_compression_put_instance(ctx->inst); memset(ctx, 0, sizeof(*ctx)); } static int qat_comp_alg_compress_decompress(struct acomp_req *areq, enum direction dir, unsigned int shdr, unsigned int sftr, unsigned int dhdr, unsigned int dftr) { struct qat_compression_req *qat_req = acomp_request_ctx(areq); struct crypto_acomp *acomp_tfm = crypto_acomp_reqtfm(areq); struct crypto_tfm *tfm = crypto_acomp_tfm(acomp_tfm); struct qat_compression_ctx *ctx = crypto_tfm_ctx(tfm); struct qat_compression_instance *inst = ctx->inst; gfp_t f = qat_algs_alloc_flags(&areq->base); struct qat_sgl_to_bufl_params params = {0}; int slen = areq->slen - shdr - sftr; int dlen = areq->dlen - dhdr - dftr; dma_addr_t sfbuf, dfbuf; u8 *req = qat_req->req; size_t ovf_buff_sz; int ret; params.sskip = shdr; params.dskip = dhdr; if (!areq->src || !slen) return -EINVAL; if (areq->dst && !dlen) return -EINVAL; qat_req->dst.is_null = false; /* Handle acomp requests that require the allocation of a destination * buffer. The size of the destination buffer is double the source * buffer (rounded up to the size of a page) to fit the decompressed * output or an expansion on the data for compression. */ if (!areq->dst) { qat_req->dst.is_null = true; dlen = round_up(2 * slen, PAGE_SIZE); areq->dst = sgl_alloc(dlen, f, NULL); if (!areq->dst) return -ENOMEM; dlen -= dhdr + dftr; areq->dlen = dlen; qat_req->dst.resubmitted = false; } if (dir == COMPRESSION) { params.extra_dst_buff = inst->dc_data->ovf_buff_p; ovf_buff_sz = inst->dc_data->ovf_buff_sz; params.sz_extra_dst_buff = ovf_buff_sz; } ret = qat_bl_sgl_to_bufl(ctx->inst->accel_dev, areq->src, areq->dst, &qat_req->buf, &params, f); if (unlikely(ret)) return ret; sfbuf = qat_req->buf.blp; dfbuf = qat_req->buf.bloutp; qat_req->qat_compression_ctx = ctx; qat_req->acompress_req = areq; qat_req->dir = dir; if (dir == COMPRESSION) { qat_req->actual_dlen = dlen; dlen += ovf_buff_sz; qat_comp_create_compression_req(ctx->comp_ctx, req, (u64)(__force long)sfbuf, slen, (u64)(__force long)dfbuf, dlen, (u64)(__force long)qat_req); } else { qat_comp_create_decompression_req(ctx->comp_ctx, req, (u64)(__force long)sfbuf, slen, (u64)(__force long)dfbuf, dlen, (u64)(__force long)qat_req); } ret = qat_alg_send_dc_message(qat_req, inst, &areq->base); if (ret == -ENOSPC) qat_bl_free_bufl(inst->accel_dev, &qat_req->buf); return ret; } static int qat_comp_alg_compress(struct acomp_req *req) { return qat_comp_alg_compress_decompress(req, COMPRESSION, 0, 0, 0, 0); } static int qat_comp_alg_decompress(struct acomp_req *req) { return qat_comp_alg_compress_decompress(req, DECOMPRESSION, 0, 0, 0, 0); } static struct acomp_alg qat_acomp[] = { { .base = { .cra_name = "deflate", .cra_driver_name = "qat_deflate", .cra_priority = 4001, .cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY, .cra_ctxsize = sizeof(struct qat_compression_ctx), .cra_module = THIS_MODULE, }, .init = qat_comp_alg_init_tfm, .exit = qat_comp_alg_exit_tfm, .compress = qat_comp_alg_compress, .decompress = qat_comp_alg_decompress, .dst_free = sgl_free, .reqsize = sizeof(struct qat_compression_req), }}; int qat_comp_algs_register(void) { int ret = 0; mutex_lock(&algs_lock); if (++active_devs == 1) ret = crypto_register_acomps(qat_acomp, ARRAY_SIZE(qat_acomp)); mutex_unlock(&algs_lock); return ret; } void qat_comp_algs_unregister(void) { mutex_lock(&algs_lock); if (--active_devs == 0) crypto_unregister_acomps(qat_acomp, ARRAY_SIZE(qat_acomp)); mutex_unlock(&algs_lock); }